summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c515.c4
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c4
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c4
-rw-r--r--drivers/net/ethernet/Kconfig3
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c7
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c5
-rw-r--r--drivers/net/ethernet/airoha/Kconfig34
-rw-r--r--drivers/net/ethernet/airoha/Makefile9
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c (renamed from drivers/net/ethernet/mediatek/airoha_eth.c)2299
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h632
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c590
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.h36
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c1343
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c186
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h933
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c60
-rw-r--r--drivers/net/ethernet/amd/a2065.c4
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c7
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c15
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c40
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c46
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c17
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h12
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c5
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c13
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c27
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h124
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c136
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c126
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c292
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c138
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c130
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c204
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c160
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-smn.h30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h136
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c16
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c68
-rw-r--r--drivers/net/ethernet/apple/mace.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h1
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c85
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c15
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig6
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c176
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h81
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c75
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c35
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/b44.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c10
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c21
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1474
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h55
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c112
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c245
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h143
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c86
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1368
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h84
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c105
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c120
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c26
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c35
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.h134
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c285
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c76
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h7
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c51
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c34
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile2
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h25
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h142
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h18
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c51
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c418
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c87
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.c436
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.h8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.c117
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.h7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h45
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c43
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/21142.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c8
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c22
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h20
-rw-r--r--drivers/net/ethernet/ec_bhf.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c205
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c83
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/fealnx.c8
-rw-r--r--drivers/net/ethernet/freescale/Kconfig4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c43
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c67
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig16
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c611
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h80
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c90
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h35
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c371
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c85
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h39
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c126
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c106
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.h3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c19
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c462
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp_private.h104
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c125
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c35
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c5
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c67
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c604
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h24
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c74
-rw-r--r--drivers/net/ethernet/google/gve/gve.h105
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c84
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c45
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c100
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c463
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c30
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c110
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c87
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c48
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile3
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h159
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c168
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c348
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c195
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c482
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c65
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c75
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c306
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c46
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h147
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c181
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c122
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c85
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c63
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c65
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c70
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/huawei/Kconfig1
-rw-r--r--drivers/net/ethernet/huawei/Makefile1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Kconfig20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c53
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h27
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c25
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h53
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c32
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h13
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h113
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c24
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h81
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h58
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c62
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c414
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c352
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c16
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h13
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h105
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c78
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c233
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h41
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h82
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h120
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c341
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c670
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h135
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c29
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h76
-rw-r--r--drivers/net/ethernet/ibm/Kconfig13
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c369
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h65
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c52
-rw-r--r--drivers/net/ethernet/intel/Kconfig7
-rw-r--r--drivers/net/ethernet/intel/e100.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c80
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c103
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c120
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c465
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c239
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h40
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h10
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h36
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c31
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c571
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.c485
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.h47
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c433
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h24
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h239
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_types.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c220
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c170
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c550
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.h71
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.c)2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h95
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h103
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c725
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c259
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c161
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c416
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c718
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h93
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c906
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h109
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c163
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c266
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c192
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c179
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h39
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c29
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c17
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c71
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c228
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c29
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c873
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h362
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c51
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c310
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h59
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c188
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h85
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c615
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h314
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h61
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c366
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c22
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c563
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c7
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h18
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h61
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c81
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c405
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c50
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c129
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c247
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h52
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c557
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c290
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h37
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c3995
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h100
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c264
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c707
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c72
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c750
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h77
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h1250
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c43
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c41
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h4
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c8
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c4
-rw-r--r--drivers/net/ethernet/korina.c5
-rw-r--r--drivers/net/ethernet/lantiq_etop.c25
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c25
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c148
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c189
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c201
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c41
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c70
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c23
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c29
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c31
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h8
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c71
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h88
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c1041
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c273
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h56
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c90
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c277
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c47
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c245
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c19
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c6
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/skge.c9
-rw-r--r--drivers/net/ethernet/marvell/sky2.c10
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig8
-rw-r--r--drivers/net/ethernet/mediatek/Makefile1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c45
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c312
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h83
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c22
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c292
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c184
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c739
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c159
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c136
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c277
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c406
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c618
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c298
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c467
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c430
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1365
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c301
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c1648
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c421
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c786
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c515
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c276
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c259
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c171
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h168
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c263
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c304
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h63
-rw-r--r--drivers/net/ethernet/meta/Kconfig1
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h38
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h120
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c174
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c258
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c1579
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c702
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h79
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c335
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h48
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c195
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c65
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c114
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h16
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c26
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c363
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h35
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.c55
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h39
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c507
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h49
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c8
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c39
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c96
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h3
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c56
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h11
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c8
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h8
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c63
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c21
-rw-r--r--drivers/net/ethernet/microchip/lan969x/Kconfig5
-rw-r--r--drivers/net/ethernet/microchip/lan969x/Makefile13
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig7
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c (renamed from drivers/net/ethernet/microchip/lan969x/lan969x.c)18
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h (renamed from drivers/net/ethernet/microchip/lan969x/lan969x.h)17
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c (renamed from drivers/net/ethernet/microchip/lan969x/lan969x_calendar.c)0
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c406
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c (renamed from drivers/net/ethernet/microchip/lan969x/lan969x_regs.c)0
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c224
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c (renamed from drivers/net/ethernet/microchip/lan969x/lan969x_vcap_ag_api.c)0
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c (renamed from drivers/net/ethernet/microchip/lan969x/lan969x_vcap_impl.c)0
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c68
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c64
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h35
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h145
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c15
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c16
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c59
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h5
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c1
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c128
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c25
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c287
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c47
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c266
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c37
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c8
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c8
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c44
-rw-r--r--drivers/net/ethernet/oa_tc6.c11
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c4
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c137
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h39
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c13
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c12
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c138
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c69
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c26
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c9
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/realtek/Kconfig3
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h9
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c595
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c227
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h16
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c96
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c23
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c12
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c2
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c281
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h75
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c11
-rw-r--r--drivers/net/ethernet/seeq/ether3.c8
-rw-r--r--drivers/net/ethernet/sfc/Kconfig5
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c8
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c47
-rw-r--r--drivers/net/ethernet/sfc/efx.c24
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c13
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.c522
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.h20
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/fw_formats.h114
-rw-r--r--drivers/net/ethernet/sfc/io.h24
-rw-r--r--drivers/net/ethernet/sfc/mae.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c121
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h22
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h13842
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c59
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c11
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h13
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c10
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c10
-rw-r--r--drivers/net/ethernet/sfc/tc.c6
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c10
-rw-r--r--drivers/net/ethernet/sis/sis190.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/smsc/epic100.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c1
-rw-r--r--drivers/net/ethernet/socionext/netsec.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c206
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c80
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c275
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c76
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c146
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c594
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c186
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c119
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c75
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c137
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h53
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c393
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c138
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h100
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c107
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c174
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c794
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c87
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c374
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h1
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c6
-rw-r--r--drivers/net/ethernet/sun/niu.c73
-rw-r--r--drivers/net/ethernet/sun/niu.h4
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sungem.c10
-rw-r--r--drivers/net/ethernet/sun/sunhme.c8
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c8
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c5
-rw-r--r--drivers/net/ethernet/tehuti/tn40.c9
-rw-r--r--drivers/net/ethernet/tehuti/tn40.h33
-rw-r--r--drivers/net/ethernet/tehuti/tn40_mdio.c84
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c765
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c37
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h7
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c141
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c468
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c41
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c575
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h64
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c84
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c20
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h58
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h33
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c4
-rw-r--r--drivers/net/ethernet/ti/tlan.c8
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig11
-rw-r--r--drivers/net/ethernet/toshiba/Makefile2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2556
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h475
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c174
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c4
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c110
-rw-r--r--drivers/net/ethernet/via/via-rhine.c11
-rw-r--r--drivers/net/ethernet/via/via-velocity.c6
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig7
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c97
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c563
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h6
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c361
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c176
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h77
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c905
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.h20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c913
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.h18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h256
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c109
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c16
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h10
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c386
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h15
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c40
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c23
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c73
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c271
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c55
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h134
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h32
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c345
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c61
1073 files changed, 75638 insertions, 36024 deletions
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 4725a8cfd695..ecdea58e6a21 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -859,7 +859,7 @@ static int corkscrew_open(struct net_device *dev)
static void corkscrew_timer(struct timer_list *t)
{
#ifdef AUTOMEDIA
- struct corkscrew_private *vp = from_timer(vp, t, timer);
+ struct corkscrew_private *vp = timer_container_of(vp, t, timer);
struct net_device *dev = vp->our_dev;
int ioaddr = dev->base_addr;
unsigned long flags;
@@ -1414,7 +1414,7 @@ static int corkscrew_close(struct net_device *dev)
dev->name, rx_nocopy, rx_copy, queued_packet);
}
- del_timer_sync(&vp->timer);
+ timer_delete_sync(&vp->timer);
/* Turn off statistics ASAP. We update lp->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index dc3b7c960611..1f2070497a75 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -858,7 +858,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
*/
static void media_check(struct timer_list *t)
{
- struct el3_private *lp = from_timer(lp, t, media);
+ struct el3_private *lp = timer_container_of(lp, t, media);
struct net_device *dev = lp->p_dev->priv;
unsigned int ioaddr = dev->base_addr;
unsigned long flags;
@@ -1140,7 +1140,7 @@ static int el3_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&lp->media);
+ timer_delete_sync(&lp->media);
return 0;
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index be58dac0502a..ea49be43b8c3 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -685,7 +685,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
static void media_check(struct timer_list *t)
{
- struct el3_private *lp = from_timer(lp, t, media);
+ struct el3_private *lp = timer_container_of(lp, t, media);
struct net_device *dev = lp->p_dev->priv;
unsigned int ioaddr = dev->base_addr;
u16 media, errs;
@@ -946,7 +946,7 @@ static int el3_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&lp->media);
+ timer_delete_sync(&lp->media);
return 0;
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 790270912913..8c9cc97efd4e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1783,7 +1783,7 @@ out:
static void
vortex_timer(struct timer_list *t)
{
- struct vortex_private *vp = from_timer(vp, t, timer);
+ struct vortex_private *vp = timer_container_of(vp, t, timer);
struct net_device *dev = vp->mii.dev;
void __iomem *ioaddr = vp->ioaddr;
int next_tick = 60*HZ;
@@ -2691,7 +2691,7 @@ vortex_down(struct net_device *dev, int final_down)
netdev_reset_queue(dev);
netif_stop_queue(dev);
- del_timer_sync(&vp->timer);
+ timer_delete_sync(&vp->timer);
/* Turn off statistics ASAP. We update dev->stats below. */
iowrite16(StatsDisable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index fea489af72fb..7c8213011b5c 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -504,7 +504,7 @@ static int axnet_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&info->watchdog);
+ timer_delete_sync(&info->watchdog);
return 0;
} /* axnet_close */
@@ -550,7 +550,7 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
static void ei_watchdog(struct timer_list *t)
{
- struct axnet_dev *info = from_timer(info, t, watchdog);
+ struct axnet_dev *info = timer_container_of(info, t, watchdog);
struct net_device *dev = info->p_dev->priv;
unsigned int nic_base = dev->base_addr;
unsigned int mii_addr = nic_base + AXNET_MII_EEP;
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 780fb4afb6af..19f9c5db3f3b 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -947,7 +947,7 @@ static int pcnet_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&info->watchdog);
+ timer_delete_sync(&info->watchdog);
return 0;
} /* pcnet_close */
@@ -1018,7 +1018,7 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
static void ei_watchdog(struct timer_list *t)
{
- struct pcnet_dev *info = from_timer(info, t, watchdog);
+ struct pcnet_dev *info = timer_container_of(info, t, watchdog);
struct net_device *dev = info->p_dev->priv;
unsigned int nic_base = dev->base_addr;
unsigned int mii_addr = nic_base + DLINK_GPIO;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 9a542e3c9b05..f86d4557d8d7 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -20,6 +20,7 @@ source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/agere/Kconfig"
+source "drivers/net/ethernet/airoha/Kconfig"
source "drivers/net/ethernet/alacritech/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
@@ -159,7 +160,7 @@ config ETHOC
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
config OA_TC6
- tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support"
+ tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support" if COMPILE_TEST
depends on SPI
select PHYLIB
help
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 99fa180dedb8..67182339469a 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
+obj-$(CONFIG_NET_VENDOR_AIROHA) += airoha/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index 115f48b3342c..0a08da799255 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1325,15 +1325,10 @@ static int owl_emac_mdio_init(struct net_device *netdev)
struct device_node *mdio_node;
int ret;
- mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(dev->of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 68fad5575fd4..30f9d271e595 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -1599,7 +1599,7 @@ static int adin1110_probe_netdevs(struct adin1110_priv *priv)
netdev->netdev_ops = &adin1110_netdev_ops;
netdev->ethtool_ops = &adin1110_ethtool_ops;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
if (IS_ERR(port_priv->phydev)) {
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index b325e0cef120..678eddb36172 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3076,7 +3076,8 @@ err_out:
*/
static void et131x_error_timer_handler(struct timer_list *t)
{
- struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
+ struct et131x_adapter *adapter = timer_container_of(adapter, t,
+ error_timer);
struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) {
@@ -3639,7 +3640,7 @@ static int et131x_close(struct net_device *netdev)
free_irq(adapter->pdev->irq, netdev);
/* Stop the error timer */
- return del_timer_sync(&adapter->error_timer);
+ return timer_delete_sync(&adapter->error_timer);
}
/* et131x_set_packet_filter - Configures the Rx Packet filtering */
diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig
new file mode 100644
index 000000000000..ad3ce501e7a5
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_VENDOR_AIROHA
+ bool "Airoha devices"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ help
+ If you have a Airoha SoC with ethernet, say Y.
+
+if NET_VENDOR_AIROHA
+
+config NET_AIROHA_NPU
+ tristate "Airoha NPU support"
+ select WANT_DEV_COREDUMP
+ select REGMAP_MMIO
+ help
+ This driver supports Airoha Network Processor (NPU) available
+ on the Airoha Soc family.
+
+config NET_AIROHA
+ tristate "Airoha SoC Gigabit Ethernet support"
+ depends on NET_DSA || !NET_DSA
+ select NET_AIROHA_NPU
+ select PAGE_POOL
+ help
+ This driver supports the gigabit ethernet MACs in the
+ Airoha SoC family.
+
+config NET_AIROHA_FLOW_STATS
+ default y
+ bool "Airoha flow stats"
+ depends on NET_AIROHA && NET_AIROHA_NPU
+ help
+ Enable Aiorha flowtable statistic counters.
+
+endif #NET_VENDOR_AIROHA
diff --git a/drivers/net/ethernet/airoha/Makefile b/drivers/net/ethernet/airoha/Makefile
new file mode 100644
index 000000000000..94468053e34b
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Airoha for the Mediatek SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_NET_AIROHA) += airoha-eth.o
+airoha-eth-y := airoha_eth.o airoha_ppe.o
+airoha-eth-$(CONFIG_DEBUG_FS) += airoha_ppe_debugfs.o
+obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 6c683a12d5aa..06dea3a13e77 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -3,841 +3,31 @@
* Copyright (c) 2024 AIROHA Inc
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
*/
-#include <linux/etherdevice.h>
-#include <linux/iopoll.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include <linux/tcp.h>
#include <linux/u64_stats_sync.h>
-#include <net/dsa.h>
+#include <net/dst_metadata.h>
#include <net/page_pool/helpers.h>
+#include <net/pkt_cls.h>
#include <uapi/linux/ppp_defs.h>
-#define AIROHA_MAX_NUM_GDM_PORTS 1
-#define AIROHA_MAX_NUM_QDMA 2
-#define AIROHA_MAX_NUM_RSTS 3
-#define AIROHA_MAX_NUM_XSI_RSTS 5
-#define AIROHA_MAX_MTU 2000
-#define AIROHA_MAX_PACKET_SIZE 2048
-#define AIROHA_NUM_TX_RING 32
-#define AIROHA_NUM_RX_RING 32
-#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
-#define AIROHA_FE_MC_MAX_VLAN_PORT 16
-#define AIROHA_NUM_TX_IRQ 2
-#define HW_DSCP_NUM 2048
-#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
-#define TX_DSCP_NUM 1024
-#define RX_DSCP_NUM(_n) \
- ((_n) == 2 ? 128 : \
- (_n) == 11 ? 128 : \
- (_n) == 15 ? 128 : \
- (_n) == 0 ? 1024 : 16)
-
-#define PSE_RSV_PAGES 128
-#define PSE_QUEUE_RSV_PAGES 64
-
-/* FE */
-#define PSE_BASE 0x0100
-#define CSR_IFC_BASE 0x0200
-#define CDM1_BASE 0x0400
-#define GDM1_BASE 0x0500
-#define PPE1_BASE 0x0c00
-
-#define CDM2_BASE 0x1400
-#define GDM2_BASE 0x1500
-
-#define GDM3_BASE 0x1100
-#define GDM4_BASE 0x2500
-
-#define GDM_BASE(_n) \
- ((_n) == 4 ? GDM4_BASE : \
- (_n) == 3 ? GDM3_BASE : \
- (_n) == 2 ? GDM2_BASE : GDM1_BASE)
-
-#define REG_FE_DMA_GLO_CFG 0x0000
-#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
-#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
-
-#define REG_FE_RST_GLO_CFG 0x0004
-#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
-#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
-#define FE_RST_CORE_MASK BIT(0)
-
-#define REG_FE_WAN_MAC_H 0x0030
-#define REG_FE_LAN_MAC_H 0x0040
-
-#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
-#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
-
-#define REG_FE_CDM1_OQ_MAP0 0x0050
-#define REG_FE_CDM1_OQ_MAP1 0x0054
-#define REG_FE_CDM1_OQ_MAP2 0x0058
-#define REG_FE_CDM1_OQ_MAP3 0x005c
-
-#define REG_FE_PCE_CFG 0x0070
-#define PCE_DPI_EN_MASK BIT(2)
-#define PCE_KA_EN_MASK BIT(1)
-#define PCE_MC_EN_MASK BIT(0)
-
-#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
-#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
-#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
-#define PSE_CFG_WR_EN_MASK BIT(8)
-#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
-
-#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
-#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
-
-#define PSE_FQ_CFG 0x008c
-#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
-
-#define REG_FE_PSE_BUF_SET 0x0090
-#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
-#define PSE_ALLRSV_MASK GENMASK(14, 0)
-
-#define REG_PSE_SHARE_USED_THD 0x0094
-#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
-#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
-
-#define REG_GDM_MISC_CFG 0x0148
-#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
-#define GDM2_CHN_VLD_MODE_MASK BIT(5)
-
-#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
-#define FE_IFC_EN_MASK BIT(0)
-
-#define REG_FE_VIP_PORT_EN 0x01f0
-#define REG_FE_IFC_PORT_EN 0x01f4
-
-#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
-#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
-
-#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
-#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
-#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
-
-#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
-#define PATN_FCPU_EN_MASK BIT(7)
-#define PATN_SWP_EN_MASK BIT(6)
-#define PATN_DP_EN_MASK BIT(5)
-#define PATN_SP_EN_MASK BIT(4)
-#define PATN_TYPE_MASK GENMASK(3, 1)
-#define PATN_EN_MASK BIT(0)
-
-#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
-#define PATN_DP_MASK GENMASK(31, 16)
-#define PATN_SP_MASK GENMASK(15, 0)
-
-#define REG_CDM1_VLAN_CTRL CDM1_BASE
-#define CDM1_VLAN_MASK GENMASK(31, 16)
-
-#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
-#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
-
-#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
-#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
- GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
-
-#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
-#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
-#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
-
-#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
-#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
- GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
-
-#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
-#define GDM_DROP_CRC_ERR BIT(23)
-#define GDM_IP4_CKSUM BIT(22)
-#define GDM_TCP_CKSUM BIT(21)
-#define GDM_UDP_CKSUM BIT(20)
-#define GDM_UCFQ_MASK GENMASK(15, 12)
-#define GDM_BCFQ_MASK GENMASK(11, 8)
-#define GDM_MCFQ_MASK GENMASK(7, 4)
-#define GDM_OCFQ_MASK GENMASK(3, 0)
-
-#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
-#define GDM_INGRESS_FC_EN_MASK BIT(1)
-#define GDM_STAG_EN_MASK BIT(0)
-
-#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
-#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
-#define GDM_LONG_LEN_MASK GENMASK(29, 16)
-
-#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
-#define FE_CPORT_PAD BIT(26)
-#define FE_CPORT_PORT_XFC_MASK BIT(25)
-#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
-
-#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
-#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
-#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
-
-#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
-#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
-#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
-#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
-#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
-#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
-
-#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
-#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
-#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
-#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
-#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
-#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
-#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
-#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
-#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
-#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
-#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
-#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
-#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
-#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
-#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
-
-#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
-#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
-#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
-#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
-#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
-#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
-#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
-#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
-#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
-#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
-#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
-#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
-#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
-#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
-#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
-#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
-#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
-#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
-#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
-#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
-#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
-#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
-
-#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250)
-#define PPE1_SRAM_TABLE_EN_MASK BIT(0)
-#define PPE1_SRAM_HASH1_EN_MASK BIT(8)
-#define PPE1_DRAM_TABLE_EN_MASK BIT(16)
-#define PPE1_DRAM_HASH1_EN_MASK BIT(24)
-
-#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
-#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
-#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
-#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
-
-#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
-#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
-#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
-#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
-#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
-#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
-#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
-#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
-#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
-#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
-#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
-#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
-#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
-#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
-#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
-#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
-
-#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
-#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
-#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
-
-#define REG_GDM3_FWD_CFG GDM3_BASE
-#define GDM3_PAD_EN_MASK BIT(28)
-
-#define REG_GDM4_FWD_CFG (GDM4_BASE + 0x100)
-#define GDM4_PAD_EN_MASK BIT(28)
-#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
-
-#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x33c)
-#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
-#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
-#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
-
-#define REG_IP_FRAG_FP 0x2010
-#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
-#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
-#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
-#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
-
-#define REG_MC_VLAN_EN 0x2100
-#define MC_VLAN_EN_MASK BIT(0)
-
-#define REG_MC_VLAN_CFG 0x2104
-#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
-#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
-#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
-#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
-#define MC_VLAN_CFG_RW_MASK BIT(0)
-
-#define REG_MC_VLAN_DATA 0x2108
-
-#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
-
-/* QDMA */
-#define REG_QDMA_GLOBAL_CFG 0x0004
-#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
-#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
-#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
-#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
-#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
-#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
-#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
-#define GLOBAL_CFG_RESET_MASK BIT(23)
-#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
-#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
-#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
-#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
-#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
-#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
-#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
-#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
-#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
-#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
-#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
-#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
-#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
-#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
-#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
-
-#define REG_FWD_DSCP_BASE 0x0010
-#define REG_FWD_BUF_BASE 0x0014
-
-#define REG_HW_FWD_DSCP_CFG 0x0018
-#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
-#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
-#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
-
-#define REG_INT_STATUS(_n) \
- (((_n) == 4) ? 0x0730 : \
- ((_n) == 3) ? 0x0724 : \
- ((_n) == 2) ? 0x0720 : \
- ((_n) == 1) ? 0x0024 : 0x0020)
-
-#define REG_INT_ENABLE(_n) \
- (((_n) == 4) ? 0x0750 : \
- ((_n) == 3) ? 0x0744 : \
- ((_n) == 2) ? 0x0740 : \
- ((_n) == 1) ? 0x002c : 0x0028)
-
-/* QDMA_CSR_INT_ENABLE1 */
-#define RX15_COHERENT_INT_MASK BIT(31)
-#define RX14_COHERENT_INT_MASK BIT(30)
-#define RX13_COHERENT_INT_MASK BIT(29)
-#define RX12_COHERENT_INT_MASK BIT(28)
-#define RX11_COHERENT_INT_MASK BIT(27)
-#define RX10_COHERENT_INT_MASK BIT(26)
-#define RX9_COHERENT_INT_MASK BIT(25)
-#define RX8_COHERENT_INT_MASK BIT(24)
-#define RX7_COHERENT_INT_MASK BIT(23)
-#define RX6_COHERENT_INT_MASK BIT(22)
-#define RX5_COHERENT_INT_MASK BIT(21)
-#define RX4_COHERENT_INT_MASK BIT(20)
-#define RX3_COHERENT_INT_MASK BIT(19)
-#define RX2_COHERENT_INT_MASK BIT(18)
-#define RX1_COHERENT_INT_MASK BIT(17)
-#define RX0_COHERENT_INT_MASK BIT(16)
-#define TX7_COHERENT_INT_MASK BIT(15)
-#define TX6_COHERENT_INT_MASK BIT(14)
-#define TX5_COHERENT_INT_MASK BIT(13)
-#define TX4_COHERENT_INT_MASK BIT(12)
-#define TX3_COHERENT_INT_MASK BIT(11)
-#define TX2_COHERENT_INT_MASK BIT(10)
-#define TX1_COHERENT_INT_MASK BIT(9)
-#define TX0_COHERENT_INT_MASK BIT(8)
-#define CNT_OVER_FLOW_INT_MASK BIT(7)
-#define IRQ1_FULL_INT_MASK BIT(5)
-#define IRQ1_INT_MASK BIT(4)
-#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
-#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
-#define IRQ0_FULL_INT_MASK BIT(1)
-#define IRQ0_INT_MASK BIT(0)
-
-#define TX_DONE_INT_MASK(_n) \
- ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
- : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-
-#define INT_TX_MASK \
- (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
- IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-
-#define INT_IDX0_MASK \
- (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
- TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
- TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
- TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
- RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
- RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
- RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
- RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
- RX15_COHERENT_INT_MASK | INT_TX_MASK)
-
-/* QDMA_CSR_INT_ENABLE2 */
-#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
-#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
-#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
-#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
-#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
-#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
-#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
-#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
-#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
-#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
-#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
-#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
-#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
-#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
-#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
-#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
-#define RX15_DONE_INT_MASK BIT(15)
-#define RX14_DONE_INT_MASK BIT(14)
-#define RX13_DONE_INT_MASK BIT(13)
-#define RX12_DONE_INT_MASK BIT(12)
-#define RX11_DONE_INT_MASK BIT(11)
-#define RX10_DONE_INT_MASK BIT(10)
-#define RX9_DONE_INT_MASK BIT(9)
-#define RX8_DONE_INT_MASK BIT(8)
-#define RX7_DONE_INT_MASK BIT(7)
-#define RX6_DONE_INT_MASK BIT(6)
-#define RX5_DONE_INT_MASK BIT(5)
-#define RX4_DONE_INT_MASK BIT(4)
-#define RX3_DONE_INT_MASK BIT(3)
-#define RX2_DONE_INT_MASK BIT(2)
-#define RX1_DONE_INT_MASK BIT(1)
-#define RX0_DONE_INT_MASK BIT(0)
-
-#define RX_DONE_INT_MASK \
- (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
- RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
- RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
- RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
- RX15_DONE_INT_MASK)
-#define INT_IDX1_MASK \
- (RX_DONE_INT_MASK | \
- RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
- RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
- RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
- RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
- RX15_NO_CPU_DSCP_INT_MASK)
-
-/* QDMA_CSR_INT_ENABLE5 */
-#define TX31_COHERENT_INT_MASK BIT(31)
-#define TX30_COHERENT_INT_MASK BIT(30)
-#define TX29_COHERENT_INT_MASK BIT(29)
-#define TX28_COHERENT_INT_MASK BIT(28)
-#define TX27_COHERENT_INT_MASK BIT(27)
-#define TX26_COHERENT_INT_MASK BIT(26)
-#define TX25_COHERENT_INT_MASK BIT(25)
-#define TX24_COHERENT_INT_MASK BIT(24)
-#define TX23_COHERENT_INT_MASK BIT(23)
-#define TX22_COHERENT_INT_MASK BIT(22)
-#define TX21_COHERENT_INT_MASK BIT(21)
-#define TX20_COHERENT_INT_MASK BIT(20)
-#define TX19_COHERENT_INT_MASK BIT(19)
-#define TX18_COHERENT_INT_MASK BIT(18)
-#define TX17_COHERENT_INT_MASK BIT(17)
-#define TX16_COHERENT_INT_MASK BIT(16)
-#define TX15_COHERENT_INT_MASK BIT(15)
-#define TX14_COHERENT_INT_MASK BIT(14)
-#define TX13_COHERENT_INT_MASK BIT(13)
-#define TX12_COHERENT_INT_MASK BIT(12)
-#define TX11_COHERENT_INT_MASK BIT(11)
-#define TX10_COHERENT_INT_MASK BIT(10)
-#define TX9_COHERENT_INT_MASK BIT(9)
-#define TX8_COHERENT_INT_MASK BIT(8)
-
-#define INT_IDX4_MASK \
- (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
- TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
- TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
- TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
- TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
- TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
- TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
- TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
- TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
- TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
- TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
- TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
-
-#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
-
-#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
-#define TX_IRQ_THR_MASK GENMASK(27, 16)
-#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
-
-#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
-#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
-
-#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
-#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
-#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
-
-#define REG_TX_RING_BASE(_n) \
- (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
-
-#define REG_TX_RING_BLOCKING(_n) \
- (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
-
-#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
-#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
-#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
-#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
-#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
-
-#define REG_TX_CPU_IDX(_n) \
- (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
-
-#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
-
-#define REG_TX_DMA_IDX(_n) \
- (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
-
-#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
-
-#define IRQ_RING_IDX_MASK GENMASK(20, 16)
-#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
-
-#define REG_RX_RING_BASE(_n) \
- (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
-
-#define REG_RX_RING_SIZE(_n) \
- (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
-
-#define RX_RING_THR_MASK GENMASK(31, 16)
-#define RX_RING_SIZE_MASK GENMASK(15, 0)
-
-#define REG_RX_CPU_IDX(_n) \
- (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
-
-#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
-
-#define REG_RX_DMA_IDX(_n) \
- (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
-
-#define REG_RX_DELAY_INT_IDX(_n) \
- (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
-
-#define RX_DELAY_INT_MASK GENMASK(15, 0)
-
-#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
-
-#define REG_INGRESS_TRTCM_CFG 0x0070
-#define INGRESS_TRTCM_EN_MASK BIT(31)
-#define INGRESS_TRTCM_MODE_MASK BIT(30)
-#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
-
-#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
-#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
-
-#define REG_LMGR_INIT_CFG 0x1000
-#define LMGR_INIT_START BIT(31)
-#define LMGR_SRAM_MODE_MASK BIT(30)
-#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
-#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
-
-#define REG_FWD_DSCP_LOW_THR 0x1004
-#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
-
-#define REG_EGRESS_RATE_METER_CFG 0x100c
-#define EGRESS_RATE_METER_EN_MASK BIT(31)
-#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
-#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
-#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
-
-#define REG_EGRESS_TRTCM_CFG 0x1010
-#define EGRESS_TRTCM_EN_MASK BIT(31)
-#define EGRESS_TRTCM_MODE_MASK BIT(30)
-#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
-
-#define REG_TXWRR_MODE_CFG 0x1020
-#define TWRR_WEIGHT_SCALE_MASK BIT(31)
-#define TWRR_WEIGHT_BASE_MASK BIT(3)
-
-#define REG_PSE_BUF_USAGE_CFG 0x1028
-#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
-
-#define REG_GLB_TRTCM_CFG 0x1080
-#define GLB_TRTCM_EN_MASK BIT(31)
-#define GLB_TRTCM_MODE_MASK BIT(30)
-#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define GLB_FAST_TICK_MASK GENMASK(15, 0)
-
-#define REG_TXQ_CNGST_CFG 0x10a0
-#define TXQ_CNGST_DROP_EN BIT(31)
-#define TXQ_CNGST_DEI_DROP_EN BIT(30)
-
-#define REG_SLA_TRTCM_CFG 0x1150
-#define SLA_TRTCM_EN_MASK BIT(31)
-#define SLA_TRTCM_MODE_MASK BIT(30)
-#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define SLA_FAST_TICK_MASK GENMASK(15, 0)
-
-/* CTRL */
-#define QDMA_DESC_DONE_MASK BIT(31)
-#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
-#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
-#define QDMA_DESC_DEI_MASK BIT(25)
-#define QDMA_DESC_NO_DROP_MASK BIT(24)
-#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
-/* DATA */
-#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
-/* TX MSG0 */
-#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
-#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
-#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
-#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
-#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
-#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
-#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
-#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
-#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
-#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
-/* TX MSG1 */
-#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
-#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
-#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
-#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
-#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
-#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
-#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
-#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
-#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
-
-/* RX MSG1 */
-#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
-#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
-#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
-#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
-#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
-#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
-#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
-#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
-#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
-
-struct airoha_qdma_desc {
- __le32 rsv;
- __le32 ctrl;
- __le32 addr;
- __le32 data;
- __le32 msg0;
- __le32 msg1;
- __le32 msg2;
- __le32 msg3;
-};
-
-/* CTRL0 */
-#define QDMA_FWD_DESC_CTX_MASK BIT(31)
-#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
-#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
-#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
-/* CTRL1 */
-#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
-/* CTRL2 */
-#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
-
-struct airoha_qdma_fwd_desc {
- __le32 addr;
- __le32 ctrl0;
- __le32 ctrl1;
- __le32 ctrl2;
- __le32 msg0;
- __le32 msg1;
- __le32 rsv0;
- __le32 rsv1;
-};
-
-enum {
- QDMA_INT_REG_IDX0,
- QDMA_INT_REG_IDX1,
- QDMA_INT_REG_IDX2,
- QDMA_INT_REG_IDX3,
- QDMA_INT_REG_IDX4,
- QDMA_INT_REG_MAX
-};
-
-enum {
- XSI_PCIE0_PORT,
- XSI_PCIE1_PORT,
- XSI_USB_PORT,
- XSI_AE_PORT,
- XSI_ETH_PORT,
-};
-
-enum {
- XSI_PCIE0_VIP_PORT_MASK = BIT(22),
- XSI_PCIE1_VIP_PORT_MASK = BIT(23),
- XSI_USB_VIP_PORT_MASK = BIT(25),
- XSI_ETH_VIP_PORT_MASK = BIT(24),
-};
-
-enum {
- DEV_STATE_INITIALIZED,
-};
-
-enum {
- CDM_CRSN_QSEL_Q1 = 1,
- CDM_CRSN_QSEL_Q5 = 5,
- CDM_CRSN_QSEL_Q6 = 6,
- CDM_CRSN_QSEL_Q15 = 15,
-};
-
-enum {
- CRSN_08 = 0x8,
- CRSN_21 = 0x15, /* KA */
- CRSN_22 = 0x16, /* hit bind and force route to CPU */
- CRSN_24 = 0x18,
- CRSN_25 = 0x19,
-};
-
-enum {
- FE_PSE_PORT_CDM1,
- FE_PSE_PORT_GDM1,
- FE_PSE_PORT_GDM2,
- FE_PSE_PORT_GDM3,
- FE_PSE_PORT_PPE1,
- FE_PSE_PORT_CDM2,
- FE_PSE_PORT_CDM3,
- FE_PSE_PORT_CDM4,
- FE_PSE_PORT_PPE2,
- FE_PSE_PORT_GDM4,
- FE_PSE_PORT_CDM5,
- FE_PSE_PORT_DROP = 0xf,
-};
+#include "airoha_regs.h"
+#include "airoha_eth.h"
-struct airoha_queue_entry {
- union {
- void *buf;
- struct sk_buff *skb;
- };
- dma_addr_t dma_addr;
- u16 dma_len;
-};
-
-struct airoha_queue {
- struct airoha_qdma *qdma;
-
- /* protect concurrent queue accesses */
- spinlock_t lock;
- struct airoha_queue_entry *entry;
- struct airoha_qdma_desc *desc;
- u16 head;
- u16 tail;
-
- int queued;
- int ndesc;
- int free_thr;
- int buf_size;
-
- struct napi_struct napi;
- struct page_pool *page_pool;
-};
-
-struct airoha_tx_irq_queue {
- struct airoha_qdma *qdma;
-
- struct napi_struct napi;
-
- int size;
- u32 *q;
-};
-
-struct airoha_hw_stats {
- /* protect concurrent hw_stats accesses */
- spinlock_t lock;
- struct u64_stats_sync syncp;
-
- /* get_stats64 */
- u64 rx_ok_pkts;
- u64 tx_ok_pkts;
- u64 rx_ok_bytes;
- u64 tx_ok_bytes;
- u64 rx_multicast;
- u64 rx_errors;
- u64 rx_drops;
- u64 tx_drops;
- u64 rx_crc_error;
- u64 rx_over_errors;
- /* ethtool stats */
- u64 tx_broadcast;
- u64 tx_multicast;
- u64 tx_len[7];
- u64 rx_broadcast;
- u64 rx_fragment;
- u64 rx_jabber;
- u64 rx_len[7];
-};
-
-struct airoha_qdma {
- struct airoha_eth *eth;
- void __iomem *regs;
-
- /* protect concurrent irqmask accesses */
- spinlock_t irq_lock;
- u32 irqmask[QDMA_INT_REG_MAX];
- int irq;
-
- struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
-
- struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
- struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-
- /* descriptor and packet buffers for qdma hw forward */
- struct {
- void *desc;
- void *q;
- } hfwd;
-};
-
-struct airoha_gdm_port {
- struct airoha_qdma *qdma;
- struct net_device *dev;
- int id;
-
- struct airoha_hw_stats stats;
-};
-
-struct airoha_eth {
- struct device *dev;
-
- unsigned long state;
- void __iomem *fe_regs;
-
- struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
- struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
-
- struct net_device *napi_dev;
-
- struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
- struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
-};
-
-static u32 airoha_rr(void __iomem *base, u32 offset)
+u32 airoha_rr(void __iomem *base, u32 offset)
{
return readl(base + offset);
}
-static void airoha_wr(void __iomem *base, u32 offset, u32 val)
+void airoha_wr(void __iomem *base, u32 offset, u32 val)
{
writel(val, base + offset);
}
-static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
{
val |= (airoha_rr(base, offset) & ~mask);
airoha_wr(base, offset, val);
@@ -845,68 +35,40 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
return val;
}
-#define airoha_fe_rr(eth, offset) \
- airoha_rr((eth)->fe_regs, (offset))
-#define airoha_fe_wr(eth, offset, val) \
- airoha_wr((eth)->fe_regs, (offset), (val))
-#define airoha_fe_rmw(eth, offset, mask, val) \
- airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
-#define airoha_fe_set(eth, offset, val) \
- airoha_rmw((eth)->fe_regs, (offset), 0, (val))
-#define airoha_fe_clear(eth, offset, val) \
- airoha_rmw((eth)->fe_regs, (offset), (val), 0)
-
-#define airoha_qdma_rr(qdma, offset) \
- airoha_rr((qdma)->regs, (offset))
-#define airoha_qdma_wr(qdma, offset, val) \
- airoha_wr((qdma)->regs, (offset), (val))
-#define airoha_qdma_rmw(qdma, offset, mask, val) \
- airoha_rmw((qdma)->regs, (offset), (mask), (val))
-#define airoha_qdma_set(qdma, offset, val) \
- airoha_rmw((qdma)->regs, (offset), 0, (val))
-#define airoha_qdma_clear(qdma, offset, val) \
- airoha_rmw((qdma)->regs, (offset), (val), 0)
-
-static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
- u32 clear, u32 set)
+static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
+ int index, u32 clear, u32 set)
{
+ struct airoha_qdma *qdma = irq_bank->qdma;
+ int bank = irq_bank - &qdma->irq_banks[0];
unsigned long flags;
- if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
return;
- spin_lock_irqsave(&qdma->irq_lock, flags);
+ spin_lock_irqsave(&irq_bank->irq_lock, flags);
- qdma->irqmask[index] &= ~clear;
- qdma->irqmask[index] |= set;
- airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
+ irq_bank->irqmask[index] &= ~clear;
+ irq_bank->irqmask[index] |= set;
+ airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
+ irq_bank->irqmask[index]);
/* Read irq_enable register in order to guarantee the update above
* completes in the spinlock critical section.
*/
- airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
-
- spin_unlock_irqrestore(&qdma->irq_lock, flags);
-}
+ airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
-static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
- u32 mask)
-{
- airoha_qdma_set_irqmask(qdma, index, 0, mask);
+ spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
}
-static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
- u32 mask)
+static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
+ int index, u32 mask)
{
- airoha_qdma_set_irqmask(qdma, index, mask, 0);
+ airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
}
-static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
+static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
+ int index, u32 mask)
{
- /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
- * GDM{2,3,4} can be used as wan port connected to an external
- * phy module.
- */
- return port->id == 1;
+ airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
}
static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
@@ -922,6 +84,8 @@ static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
+
+ airoha_ppe_init_upd_mem(port);
}
static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
@@ -937,30 +101,23 @@ static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
FIELD_PREP(GDM_UCFQ_MASK, val));
}
-static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
+static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
+ bool enable)
{
- u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
- u32 vip_port, cfg_addr;
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 vip_port;
- switch (port) {
- case XSI_PCIE0_PORT:
+ switch (port->id) {
+ case 3:
+ /* FIXME: handle XSI_PCIE1_PORT */
vip_port = XSI_PCIE0_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(3);
break;
- case XSI_PCIE1_PORT:
- vip_port = XSI_PCIE1_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(3);
- break;
- case XSI_USB_PORT:
- vip_port = XSI_USB_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(4);
- break;
- case XSI_ETH_PORT:
+ case 4:
+ /* FIXME: handle XSI_USB_PORT */
vip_port = XSI_ETH_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(4);
break;
default:
- return -EINVAL;
+ return 0;
}
if (enable) {
@@ -971,51 +128,17 @@ static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
}
- airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
-
- return 0;
-}
-
-static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
-{
- const int port_list[] = {
- XSI_PCIE0_PORT,
- XSI_PCIE1_PORT,
- XSI_USB_PORT,
- XSI_ETH_PORT
- };
- int i, err;
-
- for (i = 0; i < ARRAY_SIZE(port_list); i++) {
- err = airoha_set_gdm_port(eth, port_list[i], enable);
- if (err)
- goto error;
- }
-
return 0;
-
-error:
- for (i--; i >= 0; i--)
- airoha_set_gdm_port(eth, port_list[i], false);
-
- return err;
}
static void airoha_fe_maccr_init(struct airoha_eth *eth)
{
int p;
- for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
+ for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
GDM_DROP_CRC_ERR);
- airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
- FE_PSE_PORT_CDM1);
- airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
- GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
- FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
- FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
- }
airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
@@ -1401,6 +524,25 @@ static int airoha_fe_init(struct airoha_eth *eth)
/* disable IFC by default */
airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
+ airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0),
+ FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM1));
+ airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(1),
+ FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM2));
+
/* enable 1:N vlan action, init vlan table */
airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
@@ -1463,7 +605,7 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
switch (sport) {
- case 0x10 ... 0x13:
+ case 0x10 ... 0x14:
port = 0;
break;
case 0x2 ... 0x4:
@@ -1487,53 +629,93 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
while (done < budget) {
struct airoha_queue_entry *e = &q->entry[q->tail];
struct airoha_qdma_desc *desc = &q->desc[q->tail];
- dma_addr_t dma_addr = le32_to_cpu(desc->addr);
+ u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
+ struct page *page = virt_to_head_page(e->buf);
u32 desc_ctrl = le32_to_cpu(desc->ctrl);
- struct sk_buff *skb;
- int len, p;
+ struct airoha_gdm_port *port;
+ int data_len, len, p;
if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
break;
- if (!dma_addr)
- break;
-
- len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
- if (!len)
- break;
-
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
- dma_sync_single_for_cpu(eth->dev, dma_addr,
+ dma_sync_single_for_cpu(eth->dev, e->dma_addr,
SKB_WITH_OVERHEAD(q->buf_size), dir);
+ len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
+ data_len = q->skb ? q->buf_size
+ : SKB_WITH_OVERHEAD(q->buf_size);
+ if (!len || data_len < len)
+ goto free_frag;
+
p = airoha_qdma_get_gdm_port(eth, desc);
- if (p < 0 || !eth->ports[p]) {
- page_pool_put_full_page(q->page_pool,
- virt_to_head_page(e->buf),
- true);
- continue;
+ if (p < 0 || !eth->ports[p])
+ goto free_frag;
+
+ port = eth->ports[p];
+ if (!q->skb) { /* first buffer */
+ q->skb = napi_build_skb(e->buf, q->buf_size);
+ if (!q->skb)
+ goto free_frag;
+
+ __skb_put(q->skb, len);
+ skb_mark_for_recycle(q->skb);
+ q->skb->dev = port->dev;
+ q->skb->protocol = eth_type_trans(q->skb, port->dev);
+ q->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(q->skb, qid);
+ } else { /* scattered frame */
+ struct skb_shared_info *shinfo = skb_shinfo(q->skb);
+ int nr_frags = shinfo->nr_frags;
+
+ if (nr_frags >= ARRAY_SIZE(shinfo->frags))
+ goto free_frag;
+
+ skb_add_rx_frag(q->skb, nr_frags, page,
+ e->buf - page_address(page), len,
+ q->buf_size);
}
- skb = napi_build_skb(e->buf, q->buf_size);
- if (!skb) {
- page_pool_put_full_page(q->page_pool,
- virt_to_head_page(e->buf),
- true);
- break;
+ if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
+ continue;
+
+ if (netdev_uses_dsa(port->dev)) {
+ /* PPE module requires untagged packets to work
+ * properly and it provides DSA port index via the
+ * DMA descriptor. Report DSA tag to the DSA stack
+ * via skb dst info.
+ */
+ u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
+ le32_to_cpu(desc->msg0));
+
+ if (sptag < ARRAY_SIZE(port->dsa_meta) &&
+ port->dsa_meta[sptag])
+ skb_dst_set_noref(q->skb,
+ &port->dsa_meta[sptag]->dst);
}
- skb_reserve(skb, 2);
- __skb_put(skb, len);
- skb_mark_for_recycle(skb);
- skb->dev = eth->ports[p]->dev;
- skb->protocol = eth_type_trans(skb, skb->dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_record_rx_queue(skb, qid);
- napi_gro_receive(&q->napi, skb);
+ hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
+ if (hash != AIROHA_RXD4_FOE_ENTRY)
+ skb_set_hash(q->skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+
+ reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
+ if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ airoha_ppe_check_skb(eth->ppe, q->skb, hash);
done++;
+ napi_gro_receive(&q->napi, q->skb);
+ q->skb = NULL;
+ continue;
+free_frag:
+ if (q->skb) {
+ dev_kfree_skb(q->skb);
+ q->skb = NULL;
+ } else {
+ page_pool_put_full_page(q->page_pool, page, true);
+ }
}
airoha_qdma_fill_rx_queue(q);
@@ -1550,9 +732,20 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
done += cur;
} while (cur && done < budget);
- if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
- RX_DONE_INT_MASK);
+ if (done < budget && napi_complete(napi)) {
+ struct airoha_qdma *qdma = q->qdma;
+ int i, qid = q - &qdma->q_rx[0];
+ int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
+ : QDMA_INT_REG_IDX2;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
+ continue;
+
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
+ BIT(qid % RX_DONE_HIGH_OFFSET));
+ }
+ }
return done;
}
@@ -1608,6 +801,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
FIELD_PREP(RX_RING_THR_MASK, thr));
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
+ airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
airoha_qdma_fill_rx_queue(q);
@@ -1754,7 +948,7 @@ unlock:
}
if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(id));
return done;
@@ -1789,6 +983,10 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
}
+ /* xmit ring drop default setting */
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
+ TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
+
airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
@@ -1867,37 +1065,64 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
{
+ int size, index, num_desc = HW_DSCP_NUM;
struct airoha_eth *eth = qdma->eth;
+ int id = qdma - &eth->qdma[0];
+ u32 status, buf_size;
dma_addr_t dma_addr;
- u32 status;
- int size;
+ const char *name;
- size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
- qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!qdma->hfwd.desc)
+ name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
+ if (!name)
return -ENOMEM;
- airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+ buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE;
+ index = of_property_match_string(eth->dev->of_node,
+ "memory-region-names", name);
+ if (index >= 0) {
+ struct reserved_mem *rmem;
+ struct device_node *np;
- size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
- qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!qdma->hfwd.q)
- return -ENOMEM;
+ /* Consume reserved memory for hw forwarding buffers queue if
+ * available in the DTS
+ */
+ np = of_parse_phandle(eth->dev->of_node, "memory-region",
+ index);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+ dma_addr = rmem->base;
+ /* Compute the number of hw descriptors according to the
+ * reserved memory size and the payload buffer size
+ */
+ num_desc = div_u64(rmem->size, buf_size);
+ } else {
+ size = buf_size * num_desc;
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL))
+ return -ENOMEM;
+ }
airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
+ size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
+ return -ENOMEM;
+
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+ /* QDMA0: 2KB. QDMA1: 1KB */
airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
- FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
+ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id));
airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
HW_FWD_DESC_NUM_MASK,
- FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
- LMGR_INIT_START);
+ FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
return read_poll_timeout(airoha_qdma_rr, status,
!(status & LMGR_INIT_START), USEC_PER_MSEC,
@@ -1955,18 +1180,49 @@ static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
}
+static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
+ /* Tx-cpu transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ /* Tx-fwd transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_SRC_MASK, 1) |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ }
+}
+
static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
{
int i;
- /* clear pending irqs */
- for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ /* clear pending irqs */
airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
-
- /* setup irqs */
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
+ /* setup rx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
+ INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
+ INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
+ INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
+ INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ }
+ /* setup tx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
+ TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
+ TX_COHERENT_HIGH_INT_MASK);
/* setup irq binding */
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
@@ -1982,7 +1238,6 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
}
airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
- GLOBAL_CFG_RX_2B_OFFSET_MASK |
FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
GLOBAL_CFG_CPU_TXR_RR_MASK |
GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
@@ -2005,36 +1260,46 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
+ airoha_qdma_init_qos_stats(qdma);
return 0;
}
static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
{
- struct airoha_qdma *qdma = dev_instance;
- u32 intr[ARRAY_SIZE(qdma->irqmask)];
+ struct airoha_irq_bank *irq_bank = dev_instance;
+ struct airoha_qdma *qdma = irq_bank->qdma;
+ u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
+ u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
int i;
- for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
+ for (i = 0; i < ARRAY_SIZE(intr); i++) {
intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
- intr[i] &= qdma->irqmask[i];
+ intr[i] &= irq_bank->irqmask[i];
airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
}
if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
return IRQ_NONE;
- if (intr[1] & RX_DONE_INT_MASK) {
- airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
- RX_DONE_INT_MASK);
+ rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
+ if (rx_intr1) {
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
+ rx_intr_mask |= rx_intr1;
+ }
- for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
- if (!qdma->q_rx[i].ndesc)
- continue;
+ rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
+ if (rx_intr2) {
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
+ rx_intr_mask |= (rx_intr2 << 16);
+ }
- if (intr[1] & BIT(i))
- napi_schedule(&qdma->q_rx[i].napi);
- }
+ for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ if (rx_intr_mask & BIT(i))
+ napi_schedule(&qdma->q_rx[i].napi);
}
if (intr[0] & INT_TX_MASK) {
@@ -2042,7 +1307,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
if (!(intr[0] & TX_DONE_INT_MASK(i)))
continue;
- airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(i));
napi_schedule(&qdma->q_tx_irq[i].napi);
}
@@ -2051,6 +1316,39 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
+ struct airoha_qdma *qdma)
+{
+ struct airoha_eth *eth = qdma->eth;
+ int i, id = qdma - &eth->qdma[0];
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
+ int err, irq_index = 4 * id + i;
+ const char *name;
+
+ spin_lock_init(&irq_bank->irq_lock);
+ irq_bank->qdma = qdma;
+
+ irq_bank->irq = platform_get_irq(pdev, irq_index);
+ if (irq_bank->irq < 0)
+ return irq_bank->irq;
+
+ name = devm_kasprintf(eth->dev, GFP_KERNEL,
+ KBUILD_MODNAME ".%d", irq_index);
+ if (!name)
+ return -ENOMEM;
+
+ err = devm_request_irq(eth->dev, irq_bank->irq,
+ airoha_irq_handler, IRQF_SHARED, name,
+ irq_bank);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int airoha_qdma_init(struct platform_device *pdev,
struct airoha_eth *eth,
struct airoha_qdma *qdma)
@@ -2058,9 +1356,7 @@ static int airoha_qdma_init(struct platform_device *pdev,
int err, id = qdma - &eth->qdma[0];
const char *res;
- spin_lock_init(&qdma->irq_lock);
qdma->eth = eth;
-
res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
if (!res)
return -ENOMEM;
@@ -2070,12 +1366,7 @@ static int airoha_qdma_init(struct platform_device *pdev,
return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
"failed to iomap qdma%d regs\n", id);
- qdma->irq = platform_get_irq(pdev, 4 * id);
- if (qdma->irq < 0)
- return qdma->irq;
-
- err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, qdma);
+ err = airoha_qdma_init_irq_banks(pdev, qdma);
if (err)
return err;
@@ -2125,6 +1416,10 @@ static int airoha_hw_init(struct platform_device *pdev,
return err;
}
+ err = airoha_ppe_init(eth);
+ if (err)
+ return err;
+
set_bit(DEV_STATE_INITIALIZED, &eth->state);
return 0;
@@ -2138,17 +1433,14 @@ static void airoha_hw_cleanup(struct airoha_qdma *qdma)
if (!qdma->q_rx[i].ndesc)
continue;
- napi_disable(&qdma->q_rx[i].napi);
netif_napi_del(&qdma->q_rx[i].napi);
airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
if (qdma->q_rx[i].page_pool)
page_pool_destroy(qdma->q_rx[i].page_pool);
}
- for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
- napi_disable(&qdma->q_tx_irq[i].napi);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
netif_napi_del(&qdma->q_tx_irq[i].napi);
- }
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
if (!qdma->q_tx[i].ndesc)
@@ -2173,6 +1465,21 @@ static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
}
}
+static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ napi_disable(&qdma->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ napi_disable(&qdma->q_rx[i].napi);
+ }
+}
+
static void airoha_update_hw_stats(struct airoha_gdm_port *port)
{
struct airoha_eth *eth = port->qdma->eth;
@@ -2319,12 +1626,12 @@ static void airoha_update_hw_stats(struct airoha_gdm_port *port)
static int airoha_dev_open(struct net_device *dev)
{
+ int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
struct airoha_gdm_port *port = netdev_priv(dev);
struct airoha_qdma *qdma = port->qdma;
- int err;
netif_tx_start_all_queues(dev);
- err = airoha_set_gdm_ports(qdma->eth, true);
+ err = airoha_set_vip_for_gdm_port(port, true);
if (err)
return err;
@@ -2335,9 +1642,15 @@ static int airoha_dev_open(struct net_device *dev)
airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
+ airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+
airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
GLOBAL_CFG_TX_DMA_EN_MASK |
GLOBAL_CFG_RX_DMA_EN_MASK);
+ atomic_inc(&qdma->users);
return 0;
}
@@ -2349,20 +1662,24 @@ static int airoha_dev_stop(struct net_device *dev)
int i, err;
netif_tx_disable(dev);
- err = airoha_set_gdm_ports(qdma->eth, false);
+ err = airoha_set_vip_for_gdm_port(port, false);
if (err)
return err;
- airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
- GLOBAL_CFG_TX_DMA_EN_MASK |
- GLOBAL_CFG_RX_DMA_EN_MASK);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++)
+ netdev_tx_reset_subqueue(dev, i);
- for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
- if (!qdma->q_tx[i].ndesc)
- continue;
+ if (atomic_dec_and_test(&qdma->users)) {
+ airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
- airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
- netdev_tx_reset_subqueue(dev, i);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
+ }
}
return 0;
@@ -2382,12 +1699,81 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
return 0;
}
+static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
+{
+ u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4;
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 chan = port->id == 3 ? 4 : 0;
+
+ /* Forward the traffic to the proper GDM port */
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port);
+ airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC);
+
+ /* Enable GDM2 loopback */
+ airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff);
+ airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff);
+ airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2),
+ LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
+ FIELD_PREP(LPBK_CHAN_MASK, chan) | LPBK_EN_MASK);
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
+
+ /* Disable VIP and IFC for GDM2 */
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2));
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2));
+
+ if (port->id == 3) {
+ /* FIXME: handle XSI_PCE1_PORT */
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT));
+ airoha_fe_rmw(eth,
+ REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3),
+ SP_CPORT_PCIE0_MASK,
+ FIELD_PREP(SP_CPORT_PCIE0_MASK,
+ FE_PSE_PORT_CDM2));
+ } else {
+ /* FIXME: handle XSI_USB_PORT */
+ airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6,
+ FC_ID_OF_SRC_PORT24_MASK,
+ FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2));
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT));
+ airoha_fe_rmw(eth,
+ REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3),
+ SP_CPORT_ETH_MASK,
+ FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2));
+ }
+}
+
static int airoha_dev_init(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 pse_port;
airoha_set_macaddr(port, dev->dev_addr);
+ switch (port->id) {
+ case 3:
+ case 4:
+ /* If GDM2 is active we can't enable loopback */
+ if (!eth->ports[1])
+ airhoha_set_gdm2_loopback(port);
+ fallthrough;
+ case 2:
+ pse_port = FE_PSE_PORT_PPE2;
+ break;
+ default:
+ pse_port = FE_PSE_PORT_PPE1;
+ break;
+ }
+
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port);
+
return 0;
}
@@ -2413,21 +1799,103 @@ static void airoha_dev_get_stats64(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
+
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
+ GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+ WRITE_ONCE(dev->mtu, mtu);
+
+ return 0;
+}
+
+static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ int queue, channel;
+
+ /* For dsa device select QoS channel according to the dsa user port
+ * index, rely on port id otherwise. Select QoS queue based on the
+ * skb priority.
+ */
+ channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+ queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
+ queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
+
+ return queue < dev->num_tx_queues ? queue : 0;
+}
+
+static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct ethhdr *ehdr;
+ u8 xmit_tpid;
+ u16 tag;
+
+ if (!netdev_uses_dsa(dev))
+ return 0;
+
+ if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ return 0;
+
+ if (skb_cow_head(skb, 0))
+ return 0;
+
+ ehdr = (struct ethhdr *)skb->data;
+ tag = be16_to_cpu(ehdr->h_proto);
+ xmit_tpid = tag >> 8;
+
+ switch (xmit_tpid) {
+ case MTK_HDR_XMIT_TAGGED_TPID_8100:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021Q);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8);
+ break;
+ case MTK_HDR_XMIT_TAGGED_TPID_88A8:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021AD);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8);
+ break;
+ default:
+ /* PPE module requires untagged DSA packets to work properly,
+ * so move DSA tag to DMA descriptor.
+ */
+ memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN);
+ __skb_pull(skb, MTK_HDR_LEN);
+ break;
+ }
+
+ return tag;
+#else
+ return 0;
+#endif
+}
+
static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct skb_shared_info *sinfo = skb_shinfo(skb);
struct airoha_gdm_port *port = netdev_priv(dev);
- u32 msg0 = 0, msg1, len = skb_headlen(skb);
- int i, qid = skb_get_queue_mapping(skb);
struct airoha_qdma *qdma = port->qdma;
- u32 nr_frags = 1 + sinfo->nr_frags;
+ u32 nr_frags, tag, msg0, msg1, len;
struct netdev_queue *txq;
struct airoha_queue *q;
- void *data = skb->data;
+ void *data;
+ int i, qid;
u16 index;
u8 fport;
+ qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
+ tag = airoha_get_dsa_tag(skb, dev);
+
+ msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
+ qid / AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
+ qid % AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag);
if (skb->ip_summed == CHECKSUM_PARTIAL)
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
@@ -2438,8 +1906,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
if (skb_cow_head(skb, 0))
goto error;
- if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- __be16 csum = cpu_to_be16(sinfo->gso_size);
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
+ SKB_GSO_TCPV6)) {
+ __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
tcp_hdr(skb)->check = (__force __sum16)csum;
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
@@ -2457,6 +1926,8 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
spin_lock_bh(&q->lock);
txq = netdev_get_tx_queue(dev, qid);
+ nr_frags = 1 + skb_shinfo(skb)->nr_frags;
+
if (q->queued + nr_frags > q->ndesc) {
/* not enough space in the queue */
netif_tx_stop_queue(txq);
@@ -2464,11 +1935,14 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ len = skb_headlen(skb);
+ data = skb->data;
index = q->head;
+
for (i = 0; i < nr_frags; i++) {
struct airoha_qdma_desc *desc = &q->desc[index];
struct airoha_queue_entry *e = &q->entry[index];
- skb_frag_t *frag = &sinfo->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t addr;
u32 val;
@@ -2597,13 +2071,695 @@ airoha_ethtool_get_rmon_stats(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
+ int channel, enum tx_sched_mode mode,
+ const u16 *weights, u8 n_weights)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_TX_RING; i++)
+ airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
+ TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
+
+ for (i = 0; i < n_weights; i++) {
+ u32 status;
+ int err;
+
+ airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
+ TWRR_RW_CMD_MASK |
+ FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
+ FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
+ FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
+ err = read_poll_timeout(airoha_qdma_rr, status,
+ status & TWRR_RW_CMD_DONE,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC,
+ true, port->qdma,
+ REG_TXWRR_WEIGHT_CFG);
+ if (err)
+ return err;
+ }
+
+ airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
+ CHAN_QOS_MODE_MASK(channel),
+ mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
+
+ return 0;
+}
+
+static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
+ int channel)
+{
+ static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
+ enum tx_sched_mode mode = TC_SCH_SP;
+ u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+ int i, nstrict = 0;
+
+ if (p->bands > AIROHA_NUM_QOS_QUEUES)
+ return -EINVAL;
+
+ for (i = 0; i < p->bands; i++) {
+ if (!p->quanta[i])
+ nstrict++;
+ }
+
+ /* this configuration is not supported by the hw */
+ if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
+ return -EINVAL;
+
+ /* EN7581 SoC supports fixed QoS band priority where WRR queues have
+ * lowest priorities with respect to SP ones.
+ * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
+ */
+ for (i = 0; i < nstrict; i++) {
+ if (p->priomap[p->bands - i - 1] != i)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < p->bands - nstrict; i++) {
+ if (p->priomap[i] != nstrict + i)
+ return -EINVAL;
+
+ w[i] = p->weights[nstrict + i];
+ }
+
+ if (!nstrict)
+ mode = TC_SCH_WRR8;
+ else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
+ mode = nstrict + 1;
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL(channel << 1));
+ u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL((channel << 1) + 1));
+ u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
+ (fwd_tx_packets - port->fwd_tx_packets);
+ _bstats_update(opt->stats.bstats, 0, tx_packets);
+
+ port->cpu_tx_packets = cpu_tx_packets;
+ port->fwd_tx_packets = fwd_tx_packets;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
+ struct tc_ets_qopt_offload *opt)
+{
+ int channel;
+
+ if (opt->parent == TC_H_ROOT)
+ return -EINVAL;
+
+ channel = TC_H_MAJ(opt->handle) >> 16;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+
+ switch (opt->command) {
+ case TC_ETS_REPLACE:
+ return airoha_qdma_set_tx_ets_sched(port, channel, opt);
+ case TC_ETS_DESTROY:
+ /* PRIO is default qdisc scheduler */
+ return airoha_qdma_set_tx_prio_sched(port, channel);
+ case TC_ETS_STATS:
+ return airoha_qdma_get_tx_ets_stats(port, channel, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, enum trtcm_param_type param,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
+ u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
+ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma,
+ REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, enum trtcm_param_type param,
+ u32 val)
+{
+ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
+ u32 config = RATE_LIMIT_PARAM_RW_MASK |
+ FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
+ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, bool enable, u32 enable_mask)
+{
+ u32 val;
+ int err;
+
+ err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
+ &val, NULL);
+ if (err)
+ return err;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
+ val);
+}
+
+static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma,
+ int queue_id, u32 rate_val,
+ u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_MISC_MODE, &config, NULL);
+ if (err)
+ return err;
+
+ val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_TOKEN_RATE_MODE, rate);
+ if (err)
+ return err;
+
+ val = bucket_size;
+ if (!(config & TRTCM_PKT_MODE))
+ val = max_t(u32, val, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_BUCKETSIZE_SHIFT_MODE, val);
+}
+
+static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id,
+ bool enable, enum trtcm_unit_type unit)
+{
+ bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8;
+ enum trtcm_param mode = TRTCM_METER_MODE;
+ int err;
+
+ mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0;
+ err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ enable, mode);
+ if (err)
+ return err;
+
+ return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ tick_sel, TRTCM_TICK_SEL);
+}
+
+static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode, u32 val)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 config = TRTCM_PARAM_RW_MASK |
+ FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_mode_type mode,
+ bool enable, u32 enable_mask)
+{
+ u32 val;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &val, NULL))
+ return -EINVAL;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
+ int channel, u32 addr,
+ enum trtcm_mode_type mode,
+ u32 rate_val, u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &config, NULL))
+ return -EINVAL;
+
+ val = airoha_qdma_rr(qdma, addr);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_TOKEN_RATE_MODE, mode, rate);
+ if (err)
+ return err;
+
+ val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
+ int channel, u32 rate,
+ u32 bucket_size)
+{
+ int i, err;
+
+ for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
+ err = airoha_qdma_set_trtcm_config(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG, i,
+ !!rate, TRTCM_METER_MODE);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG,
+ i, rate, bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+ u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
+ struct net_device *dev = port->dev;
+ int num_tx_queues = dev->real_num_tx_queues;
+ int err;
+
+ if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
+ return -EINVAL;
+ }
+
+ err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed configuring htb offload");
+ return err;
+ }
+
+ if (opt->command == TC_HTB_NODE_MODIFY)
+ return 0;
+
+ err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
+ if (err) {
+ airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed setting real_num_tx_queues");
+ return err;
+ }
+
+ set_bit(channel, port->qos_sq_bmap);
+ opt->qid = AIROHA_NUM_TX_RING + channel;
+
+ return 0;
+}
+
+static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port,
+ u32 rate, u32 bucket_size,
+ enum trtcm_unit_type unit_type)
+{
+ struct airoha_qdma *qdma = port->qdma;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ int err;
+
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_rl_token_bucket(qdma, i, rate,
+ bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f)
+{
+ const struct flow_action *actions = &f->rule->action;
+ const struct flow_action_entry *act;
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "filter run with no actions");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "only once action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &actions->entries[0];
+ if (act->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "invalid exceed action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "invalid notexceed action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(actions, act)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "action accept must be last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps || act->police.avrate ||
+ act->police.overhead || act->police.mtu) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "peakrate/avrate/overhead/mtu unsupported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *f)
+{
+ enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u32 rate = 0, bucket_size = 0;
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE: {
+ const struct flow_action_entry *act;
+ int err;
+
+ err = airoha_tc_matchall_act_validate(f);
+ if (err)
+ return err;
+
+ act = &f->rule->action.entries[0];
+ if (act->police.rate_pkt_ps) {
+ rate = act->police.rate_pkt_ps;
+ bucket_size = act->police.burst_pkt;
+ unit_type = TRTCM_PACKET_UNIT;
+ } else {
+ rate = div_u64(act->police.rate_bytes_ps, 1000);
+ rate = rate << 3; /* Kbps */
+ bucket_size = act->police.burst;
+ }
+ fallthrough;
+ }
+ case TC_CLSMATCHALL_DESTROY:
+ return airoha_qdma_set_rx_meter(port, rate, bucket_size,
+ unit_type);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct net_device *dev = cb_priv;
+
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return airoha_ppe_setup_tc_block_cb(dev, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return airoha_dev_tc_matchall(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
+ struct flow_block_offload *f)
+{
+ flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &block_cb_list;
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (block_cb) {
+ flow_block_cb_incref(block_cb);
+ return 0;
+ }
+ block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_incref(block_cb);
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
+{
+ struct net_device *dev = port->dev;
+
+ netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
+ airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
+ clear_bit(queue, port->qos_sq_bmap);
+}
+
+static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ airoha_tc_remove_htb_queue(port, channel);
+
+ return 0;
+}
+
+static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
+{
+ int q;
+
+ for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
+ airoha_tc_remove_htb_queue(port, q);
+
+ return 0;
+}
+
+static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ opt->qid = AIROHA_NUM_TX_RING + channel;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_HTB_CREATE:
+ break;
+ case TC_HTB_DESTROY:
+ return airoha_tc_htb_destroy(port);
+ case TC_HTB_NODE_MODIFY:
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ return airoha_tc_htb_alloc_leaf_queue(port, opt);
+ case TC_HTB_LEAF_DEL:
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return airoha_tc_htb_delete_leaf_queue(port, opt);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ return airoha_tc_get_htb_get_leaf_queue(port, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_ETS:
+ return airoha_tc_setup_qdisc_ets(port, type_data);
+ case TC_SETUP_QDISC_HTB:
+ return airoha_tc_setup_qdisc_htb(port, type_data);
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
+ return airoha_dev_setup_tc_block(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops airoha_netdev_ops = {
.ndo_init = airoha_dev_init,
.ndo_open = airoha_dev_open,
.ndo_stop = airoha_dev_stop,
+ .ndo_change_mtu = airoha_dev_change_mtu,
+ .ndo_select_queue = airoha_dev_select_queue,
.ndo_start_xmit = airoha_dev_xmit,
.ndo_get_stats64 = airoha_dev_get_stats64,
.ndo_set_mac_address = airoha_dev_set_macaddr,
+ .ndo_setup_tc = airoha_dev_tc_setup,
};
static const struct ethtool_ops airoha_ethtool_ops = {
@@ -2612,13 +2768,58 @@ static const struct ethtool_ops airoha_ethtool_ops = {
.get_rmon_stats = airoha_ethtool_get_rmon_stats,
};
-static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
+static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ struct metadata_dst *md_dst;
+
+ md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!md_dst)
+ return -ENOMEM;
+
+ md_dst->u.port_info.port_id = i;
+ port->dsa_meta[i] = md_dst;
+ }
+
+ return 0;
+}
+
+static void airoha_metadata_dst_free(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ if (!port->dsa_meta[i])
+ continue;
+
+ metadata_dst_free(port->dsa_meta[i]);
+ }
+}
+
+bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
+ struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ if (eth->ports[i] == port)
+ return true;
+ }
+
+ return false;
+}
+
+static int airoha_alloc_gdm_port(struct airoha_eth *eth,
+ struct device_node *np, int index)
{
const __be32 *id_ptr = of_get_property(np, "reg", NULL);
struct airoha_gdm_port *port;
struct airoha_qdma *qdma;
struct net_device *dev;
- int err, index;
+ int err, p;
u32 id;
if (!id_ptr) {
@@ -2627,20 +2828,21 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
}
id = be32_to_cpup(id_ptr);
- index = id - 1;
+ p = id - 1;
if (!id || id > ARRAY_SIZE(eth->ports)) {
dev_err(eth->dev, "invalid gdm port id: %d\n", id);
return -EINVAL;
}
- if (eth->ports[index]) {
+ if (eth->ports[p]) {
dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
return -EINVAL;
}
dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
- AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING);
+ AIROHA_NUM_NETDEV_TX_RINGS,
+ AIROHA_NUM_RX_RING);
if (!dev) {
dev_err(eth->dev, "alloc_etherdev failed\n");
return -ENOMEM;
@@ -2653,12 +2855,19 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
dev->watchdog_timeo = 5 * HZ;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
- NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_TC;
dev->features |= dev->hw_features;
+ dev->vlan_features = dev->hw_features;
dev->dev.of_node = np;
- dev->irq = qdma->irq;
+ dev->irq = qdma->irq_banks[0].irq;
SET_NETDEV_DEV(dev, eth->dev);
+ /* reserve hw queues for HTB offloading */
+ err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
+ if (err)
+ return err;
+
err = of_get_ethdev_address(np, dev);
if (err) {
if (err == -EPROBE_DEFER)
@@ -2675,9 +2884,21 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
port->qdma = qdma;
port->dev = dev;
port->id = id;
- eth->ports[index] = port;
+ eth->ports[p] = port;
- return register_netdev(dev);
+ err = airoha_metadata_dst_alloc(port);
+ if (err)
+ return err;
+
+ err = register_netdev(dev);
+ if (err)
+ goto free_metadata_dst;
+
+ return 0;
+
+free_metadata_dst:
+ airoha_metadata_dst_free(port);
+ return err;
}
static int airoha_probe(struct platform_device *pdev)
@@ -2738,11 +2959,12 @@ static int airoha_probe(struct platform_device *pdev)
err = airoha_hw_init(pdev, eth);
if (err)
- goto error;
+ goto error_hw_cleanup;
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_qdma_start_napi(&eth->qdma[i]);
+ i = 0;
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_device_is_compatible(np, "airoha,eth-mac"))
continue;
@@ -2750,24 +2972,29 @@ static int airoha_probe(struct platform_device *pdev)
if (!of_device_is_available(np))
continue;
- err = airoha_alloc_gdm_port(eth, np);
+ err = airoha_alloc_gdm_port(eth, np, i++);
if (err) {
of_node_put(np);
- goto error;
+ goto error_napi_stop;
}
}
return 0;
-error:
+error_napi_stop:
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_qdma_stop_napi(&eth->qdma[i]);
+error_hw_cleanup:
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_hw_cleanup(&eth->qdma[i]);
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
- if (port && port->dev->reg_state == NETREG_REGISTERED)
+ if (port && port->dev->reg_state == NETREG_REGISTERED) {
unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
+ }
}
free_netdev(eth->napi_dev);
platform_set_drvdata(pdev, NULL);
@@ -2780,8 +3007,10 @@ static void airoha_remove(struct platform_device *pdev)
struct airoha_eth *eth = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ airoha_qdma_stop_napi(&eth->qdma[i]);
airoha_hw_cleanup(&eth->qdma[i]);
+ }
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
@@ -2791,9 +3020,11 @@ static void airoha_remove(struct platform_device *pdev)
airoha_dev_stop(port->dev);
unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
}
free_netdev(eth->napi_dev);
+ airoha_ppe_deinit(eth);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
new file mode 100644
index 000000000000..a970b789cf23
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -0,0 +1,632 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_ETH_H
+#define AIROHA_ETH_H
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+
+#define AIROHA_MAX_NUM_GDM_PORTS 4
+#define AIROHA_MAX_NUM_QDMA 2
+#define AIROHA_MAX_NUM_IRQ_BANKS 4
+#define AIROHA_MAX_DSA_PORTS 7
+#define AIROHA_MAX_NUM_RSTS 3
+#define AIROHA_MAX_NUM_XSI_RSTS 5
+#define AIROHA_MAX_MTU 9216
+#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_QOS_CHANNELS 4
+#define AIROHA_NUM_QOS_QUEUES 8
+#define AIROHA_NUM_TX_RING 32
+#define AIROHA_NUM_RX_RING 32
+#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
+ AIROHA_NUM_QOS_CHANNELS)
+#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
+#define AIROHA_FE_MC_MAX_VLAN_PORT 16
+#define AIROHA_NUM_TX_IRQ 2
+#define HW_DSCP_NUM 2048
+#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
+#define TX_DSCP_NUM 1024
+#define RX_DSCP_NUM(_n) \
+ ((_n) == 2 ? 128 : \
+ (_n) == 11 ? 128 : \
+ (_n) == 15 ? 128 : \
+ (_n) == 0 ? 1024 : 16)
+
+#define PSE_RSV_PAGES 128
+#define PSE_QUEUE_RSV_PAGES 64
+
+#define QDMA_METER_IDX(_n) ((_n) & 0xff)
+#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
+
+#define PPE_NUM 2
+#define PPE1_SRAM_NUM_ENTRIES (8 * 1024)
+#define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES)
+#ifdef CONFIG_NET_AIROHA_FLOW_STATS
+#define PPE1_STATS_NUM_ENTRIES (4 * 1024)
+#else
+#define PPE1_STATS_NUM_ENTRIES 0
+#endif /* CONFIG_NET_AIROHA_FLOW_STATS */
+#define PPE_STATS_NUM_ENTRIES (2 * PPE1_STATS_NUM_ENTRIES)
+#define PPE1_SRAM_NUM_DATA_ENTRIES (PPE1_SRAM_NUM_ENTRIES - PPE1_STATS_NUM_ENTRIES)
+#define PPE_SRAM_NUM_DATA_ENTRIES (2 * PPE1_SRAM_NUM_DATA_ENTRIES)
+#define PPE_DRAM_NUM_ENTRIES (16 * 1024)
+#define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES)
+#define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1)
+#define PPE_ENTRY_SIZE 80
+#define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10))
+
+#define MTK_HDR_LEN 4
+#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
+#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
+
+enum {
+ QDMA_INT_REG_IDX0,
+ QDMA_INT_REG_IDX1,
+ QDMA_INT_REG_IDX2,
+ QDMA_INT_REG_IDX3,
+ QDMA_INT_REG_IDX4,
+ QDMA_INT_REG_MAX
+};
+
+enum {
+ HSGMII_LAN_PCIE0_SRCPORT = 0x16,
+ HSGMII_LAN_PCIE1_SRCPORT,
+ HSGMII_LAN_ETH_SRCPORT,
+ HSGMII_LAN_USB_SRCPORT,
+};
+
+enum {
+ XSI_PCIE0_VIP_PORT_MASK = BIT(22),
+ XSI_PCIE1_VIP_PORT_MASK = BIT(23),
+ XSI_USB_VIP_PORT_MASK = BIT(25),
+ XSI_ETH_VIP_PORT_MASK = BIT(24),
+};
+
+enum {
+ DEV_STATE_INITIALIZED,
+};
+
+enum {
+ CDM_CRSN_QSEL_Q1 = 1,
+ CDM_CRSN_QSEL_Q5 = 5,
+ CDM_CRSN_QSEL_Q6 = 6,
+ CDM_CRSN_QSEL_Q15 = 15,
+};
+
+enum {
+ CRSN_08 = 0x8,
+ CRSN_21 = 0x15, /* KA */
+ CRSN_22 = 0x16, /* hit bind and force route to CPU */
+ CRSN_24 = 0x18,
+ CRSN_25 = 0x19,
+};
+
+enum {
+ FE_PSE_PORT_CDM1,
+ FE_PSE_PORT_GDM1,
+ FE_PSE_PORT_GDM2,
+ FE_PSE_PORT_GDM3,
+ FE_PSE_PORT_PPE1,
+ FE_PSE_PORT_CDM2,
+ FE_PSE_PORT_CDM3,
+ FE_PSE_PORT_CDM4,
+ FE_PSE_PORT_PPE2,
+ FE_PSE_PORT_GDM4,
+ FE_PSE_PORT_CDM5,
+ FE_PSE_PORT_DROP = 0xf,
+};
+
+enum tx_sched_mode {
+ TC_SCH_WRR8,
+ TC_SCH_SP,
+ TC_SCH_WRR7,
+ TC_SCH_WRR6,
+ TC_SCH_WRR5,
+ TC_SCH_WRR4,
+ TC_SCH_WRR3,
+ TC_SCH_WRR2,
+};
+
+enum trtcm_unit_type {
+ TRTCM_BYTE_UNIT,
+ TRTCM_PACKET_UNIT,
+};
+
+enum trtcm_param_type {
+ TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
+ TRTCM_TOKEN_RATE_MODE,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ TRTCM_BUCKET_COUNTER_MODE,
+};
+
+enum trtcm_mode_type {
+ TRTCM_COMMIT_MODE,
+ TRTCM_PEAK_MODE,
+};
+
+enum trtcm_param {
+ TRTCM_TICK_SEL = BIT(0),
+ TRTCM_PKT_MODE = BIT(1),
+ TRTCM_METER_MODE = BIT(2),
+};
+
+#define MIN_TOKEN_SIZE 4096
+#define MAX_TOKEN_SIZE_OFFSET 17
+#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
+#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
+
+struct airoha_queue_entry {
+ union {
+ void *buf;
+ struct sk_buff *skb;
+ };
+ dma_addr_t dma_addr;
+ u16 dma_len;
+};
+
+struct airoha_queue {
+ struct airoha_qdma *qdma;
+
+ /* protect concurrent queue accesses */
+ spinlock_t lock;
+ struct airoha_queue_entry *entry;
+ struct airoha_qdma_desc *desc;
+ u16 head;
+ u16 tail;
+
+ int queued;
+ int ndesc;
+ int free_thr;
+ int buf_size;
+
+ struct napi_struct napi;
+ struct page_pool *page_pool;
+ struct sk_buff *skb;
+};
+
+struct airoha_tx_irq_queue {
+ struct airoha_qdma *qdma;
+
+ struct napi_struct napi;
+
+ int size;
+ u32 *q;
+};
+
+struct airoha_hw_stats {
+ /* protect concurrent hw_stats accesses */
+ spinlock_t lock;
+ struct u64_stats_sync syncp;
+
+ /* get_stats64 */
+ u64 rx_ok_pkts;
+ u64 tx_ok_pkts;
+ u64 rx_ok_bytes;
+ u64 tx_ok_bytes;
+ u64 rx_multicast;
+ u64 rx_errors;
+ u64 rx_drops;
+ u64 tx_drops;
+ u64 rx_crc_error;
+ u64 rx_over_errors;
+ /* ethtool stats */
+ u64 tx_broadcast;
+ u64 tx_multicast;
+ u64 tx_len[7];
+ u64 rx_broadcast;
+ u64 rx_fragment;
+ u64 rx_jabber;
+ u64 rx_len[7];
+};
+
+enum {
+ PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
+};
+
+enum {
+ AIROHA_FOE_STATE_INVALID,
+ AIROHA_FOE_STATE_UNBIND,
+ AIROHA_FOE_STATE_BIND,
+ AIROHA_FOE_STATE_FIN
+};
+
+enum {
+ PPE_PKT_TYPE_IPV4_HNAPT = 0,
+ PPE_PKT_TYPE_IPV4_ROUTE = 1,
+ PPE_PKT_TYPE_BRIDGE = 2,
+ PPE_PKT_TYPE_IPV4_DSLITE = 3,
+ PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
+ PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
+ PPE_PKT_TYPE_IPV6_6RD = 7,
+};
+
+#define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16)
+#define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0)
+
+struct airoha_foe_mac_info_common {
+ u16 vlan1;
+ u16 etype;
+
+ u32 dest_mac_hi;
+
+ u16 vlan2;
+ u16 dest_mac_lo;
+
+ u32 src_mac_hi;
+};
+
+struct airoha_foe_mac_info {
+ struct airoha_foe_mac_info_common common;
+
+ u16 pppoe_id;
+ u16 src_mac_lo;
+
+ u32 meter;
+};
+
+#define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24)
+#define AIROHA_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
+#define AIROHA_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
+
+#define AIROHA_FOE_IB1_BIND_STATIC BIT(31)
+#define AIROHA_FOE_IB1_BIND_UDP BIT(30)
+#define AIROHA_FOE_IB1_BIND_STATE GENMASK(29, 28)
+#define AIROHA_FOE_IB1_BIND_PACKET_TYPE GENMASK(27, 25)
+#define AIROHA_FOE_IB1_BIND_TTL BIT(24)
+#define AIROHA_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
+#define AIROHA_FOE_IB1_BIND_PPPOE BIT(22)
+#define AIROHA_FOE_IB1_BIND_VPM GENMASK(21, 20)
+#define AIROHA_FOE_IB1_BIND_VLAN_LAYER GENMASK(19, 16)
+#define AIROHA_FOE_IB1_BIND_KEEPALIVE BIT(15)
+#define AIROHA_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
+
+#define AIROHA_FOE_IB2_DSCP GENMASK(31, 24)
+#define AIROHA_FOE_IB2_PORT_AG GENMASK(23, 13)
+#define AIROHA_FOE_IB2_PCP BIT(12)
+#define AIROHA_FOE_IB2_MULTICAST BIT(11)
+#define AIROHA_FOE_IB2_FAST_PATH BIT(10)
+#define AIROHA_FOE_IB2_PSE_QOS BIT(9)
+#define AIROHA_FOE_IB2_PSE_PORT GENMASK(8, 5)
+#define AIROHA_FOE_IB2_NBQ GENMASK(4, 0)
+
+#define AIROHA_FOE_ACTDP GENMASK(31, 24)
+#define AIROHA_FOE_SHAPER_ID GENMASK(23, 16)
+#define AIROHA_FOE_CHANNEL GENMASK(15, 11)
+#define AIROHA_FOE_QID GENMASK(10, 8)
+#define AIROHA_FOE_DPI BIT(7)
+#define AIROHA_FOE_TUNNEL BIT(6)
+#define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0)
+
+#define AIROHA_FOE_TUNNEL_MTU GENMASK(31, 16)
+#define AIROHA_FOE_ACNT_GRP3 GENMASK(15, 9)
+#define AIROHA_FOE_METER_GRP3 GENMASK(8, 5)
+#define AIROHA_FOE_METER_GRP2 GENMASK(4, 0)
+
+struct airoha_foe_bridge {
+ u32 dest_mac_hi;
+
+ u16 src_mac_hi;
+ u16 dest_mac_lo;
+
+ u32 src_mac_lo;
+
+ u32 ib2;
+
+ u32 rsv[5];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_tuple {
+ u32 src_ip;
+ u32 dest_ip;
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 _pad[3]; /* fill with 0xa5a5a5 */
+ };
+ u32 ports;
+ };
+};
+
+struct airoha_foe_ipv4 {
+ struct airoha_foe_ipv4_tuple orig_tuple;
+
+ u32 ib2;
+
+ struct airoha_foe_ipv4_tuple new_tuple;
+
+ u32 rsv[2];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_dslite {
+ struct airoha_foe_ipv4_tuple ip4;
+
+ u32 ib2;
+
+ u8 flow_label[3];
+ u8 priority;
+
+ u32 rsv[4];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv6 {
+ u32 src_ip[4];
+ u32 dest_ip[4];
+
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 pad[3];
+ };
+ u32 ports;
+ };
+
+ u32 data;
+
+ u32 ib2;
+
+ struct airoha_foe_mac_info_common l2;
+
+ u32 meter;
+};
+
+struct airoha_foe_entry {
+ union {
+ struct {
+ u32 ib1;
+ union {
+ struct airoha_foe_bridge bridge;
+ struct airoha_foe_ipv4 ipv4;
+ struct airoha_foe_ipv4_dslite dslite;
+ struct airoha_foe_ipv6 ipv6;
+ DECLARE_FLEX_ARRAY(u32, d);
+ };
+ };
+ u8 data[PPE_ENTRY_SIZE];
+ };
+};
+
+struct airoha_foe_stats {
+ u32 bytes;
+ u32 packets;
+};
+
+struct airoha_foe_stats64 {
+ u64 bytes;
+ u64 packets;
+};
+
+struct airoha_flow_data {
+ struct ethhdr eth;
+
+ union {
+ struct {
+ __be32 src_addr;
+ __be32 dst_addr;
+ } v4;
+
+ struct {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ } v6;
+ };
+
+ __be16 src_port;
+ __be16 dst_port;
+
+ struct {
+ struct {
+ u16 id;
+ __be16 proto;
+ } hdr[2];
+ u8 num;
+ } vlan;
+ struct {
+ u16 sid;
+ u8 num;
+ } pppoe;
+};
+
+enum airoha_flow_entry_type {
+ FLOW_TYPE_L4,
+ FLOW_TYPE_L2,
+ FLOW_TYPE_L2_SUBFLOW,
+};
+
+struct airoha_flow_table_entry {
+ union {
+ struct hlist_node list; /* PPE L3 flow entry */
+ struct {
+ struct rhash_head l2_node; /* L2 flow entry */
+ struct hlist_head l2_flows; /* PPE L2 subflows list */
+ };
+ };
+
+ struct airoha_foe_entry data;
+ struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */
+ u32 hash;
+
+ struct airoha_foe_stats64 stats;
+ enum airoha_flow_entry_type type;
+
+ struct rhash_head node;
+ unsigned long cookie;
+};
+
+/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
+#define RX_IRQ0_BANK_PIN_MASK 0x839f
+#define RX_IRQ1_BANK_PIN_MASK 0x7fe00000
+#define RX_IRQ2_BANK_PIN_MASK 0x20
+#define RX_IRQ3_BANK_PIN_MASK 0x40
+#define RX_IRQ_BANK_PIN_MASK(_n) \
+ (((_n) == 3) ? RX_IRQ3_BANK_PIN_MASK : \
+ ((_n) == 2) ? RX_IRQ2_BANK_PIN_MASK : \
+ ((_n) == 1) ? RX_IRQ1_BANK_PIN_MASK : \
+ RX_IRQ0_BANK_PIN_MASK)
+
+struct airoha_irq_bank {
+ struct airoha_qdma *qdma;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
+};
+
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
+
+ atomic_t users;
+
+ struct airoha_irq_bank irq_banks[AIROHA_MAX_NUM_IRQ_BANKS];
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+};
+
+struct airoha_gdm_port {
+ struct airoha_qdma *qdma;
+ struct net_device *dev;
+ int id;
+
+ struct airoha_hw_stats stats;
+
+ DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
+
+ /* qos stats counters */
+ u64 cpu_tx_packets;
+ u64 fwd_tx_packets;
+
+ struct metadata_dst *dsa_meta[AIROHA_MAX_DSA_PORTS];
+};
+
+#define AIROHA_RXD4_PPE_CPU_REASON GENMASK(20, 16)
+#define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0)
+
+struct airoha_ppe {
+ struct airoha_eth *eth;
+
+ void *foe;
+ dma_addr_t foe_dma;
+
+ struct rhashtable l2_flows;
+
+ struct hlist_head *foe_flow;
+ u16 foe_check_time[PPE_NUM_ENTRIES];
+
+ struct airoha_foe_stats *foe_stats;
+ dma_addr_t foe_stats_dma;
+
+ struct dentry *debugfs_dir;
+};
+
+struct airoha_eth {
+ struct device *dev;
+
+ unsigned long state;
+ void __iomem *fe_regs;
+
+ struct airoha_npu __rcu *npu;
+
+ struct airoha_ppe *ppe;
+ struct rhashtable flow_table;
+
+ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
+ struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
+
+ struct net_device *napi_dev;
+
+ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+};
+
+u32 airoha_rr(void __iomem *base, u32 offset);
+void airoha_wr(void __iomem *base, u32 offset, u32 val);
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val);
+
+#define airoha_fe_rr(eth, offset) \
+ airoha_rr((eth)->fe_regs, (offset))
+#define airoha_fe_wr(eth, offset, val) \
+ airoha_wr((eth)->fe_regs, (offset), (val))
+#define airoha_fe_rmw(eth, offset, mask, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
+#define airoha_fe_set(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), 0, (val))
+#define airoha_fe_clear(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (val), 0)
+
+#define airoha_qdma_rr(qdma, offset) \
+ airoha_rr((qdma)->regs, (offset))
+#define airoha_qdma_wr(qdma, offset, val) \
+ airoha_wr((qdma)->regs, (offset), (val))
+#define airoha_qdma_rmw(qdma, offset, mask, val) \
+ airoha_rmw((qdma)->regs, (offset), (mask), (val))
+#define airoha_qdma_set(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), 0, (val))
+#define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
+{
+ /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
+ * GDM{2,3,4} can be used as wan port connected to an external
+ * phy module.
+ */
+ return port->id == 1;
+}
+
+bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
+ struct airoha_gdm_port *port);
+
+void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
+ u16 hash);
+int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
+int airoha_ppe_init(struct airoha_eth *eth);
+void airoha_ppe_deinit(struct airoha_eth *eth);
+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash);
+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+ struct airoha_foe_stats64 *stats);
+
+#ifdef CONFIG_DEBUG_FS
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
+#else
+static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ return 0;
+}
+#endif
+
+#endif /* AIROHA_ETH_H */
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
new file mode 100644
index 000000000000..0e5b8c21b9aa
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/devcoredump.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/regmap.h>
+
+#include "airoha_eth.h"
+#include "airoha_npu.h"
+
+#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
+#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
+#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000
+#define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000
+#define NPU_DUMP_SIZE 512
+
+#define REG_NPU_LOCAL_SRAM 0x0
+
+#define NPU_PC_BASE_ADDR 0x305000
+#define REG_PC_DBG(_n) (0x305000 + ((_n) * 0x100))
+
+#define NPU_CLUSTER_BASE_ADDR 0x306000
+
+#define REG_CR_BOOT_TRIGGER (NPU_CLUSTER_BASE_ADDR + 0x000)
+#define REG_CR_BOOT_CONFIG (NPU_CLUSTER_BASE_ADDR + 0x004)
+#define REG_CR_BOOT_BASE(_n) (NPU_CLUSTER_BASE_ADDR + 0x020 + ((_n) << 2))
+
+#define NPU_MBOX_BASE_ADDR 0x30c000
+
+#define REG_CR_MBOX_INT_STATUS (NPU_MBOX_BASE_ADDR + 0x000)
+#define MBOX_INT_STATUS_MASK BIT(8)
+
+#define REG_CR_MBOX_INT_MASK(_n) (NPU_MBOX_BASE_ADDR + 0x004 + ((_n) << 2))
+#define REG_CR_MBQ0_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x030 + ((_n) << 2))
+#define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
+#define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
+
+#define NPU_TIMER_BASE_ADDR 0x310100
+#define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
+#define WDT_EN_MASK BIT(25)
+#define WDT_INTR_MASK BIT(21)
+
+enum {
+ NPU_OP_SET = 1,
+ NPU_OP_SET_NO_WAIT,
+ NPU_OP_GET,
+ NPU_OP_GET_NO_WAIT,
+};
+
+enum {
+ NPU_FUNC_WIFI,
+ NPU_FUNC_TUNNEL,
+ NPU_FUNC_NOTIFY,
+ NPU_FUNC_DBA,
+ NPU_FUNC_TR471,
+ NPU_FUNC_PPE,
+};
+
+enum {
+ NPU_MBOX_ERROR,
+ NPU_MBOX_SUCCESS,
+};
+
+enum {
+ PPE_FUNC_SET_WAIT,
+ PPE_FUNC_SET_WAIT_HWNAT_INIT,
+ PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
+ PPE_FUNC_SET_WAIT_API,
+ PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP,
+};
+
+enum {
+ PPE2_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_VAL,
+ PPE_SRAM_RESET_VAL,
+};
+
+enum {
+ QDMA_WAN_ETHER = 1,
+ QDMA_WAN_PON_XDSL,
+};
+
+#define MBOX_MSG_FUNC_ID GENMASK(14, 11)
+#define MBOX_MSG_STATIC_BUF BIT(5)
+#define MBOX_MSG_STATUS GENMASK(4, 2)
+#define MBOX_MSG_DONE BIT(1)
+#define MBOX_MSG_WAIT_RSP BIT(0)
+
+#define PPE_TYPE_L2B_IPV4 2
+#define PPE_TYPE_L2B_IPV4_IPV6 3
+
+struct ppe_mbox_data {
+ u32 func_type;
+ u32 func_id;
+ union {
+ struct {
+ u8 cds;
+ u8 xpon_hal_api;
+ u8 wan_xsi;
+ u8 ct_joyme4;
+ u8 max_packet;
+ u8 rsv[3];
+ u32 ppe_type;
+ u32 wan_mode;
+ u32 wan_sel;
+ } init_info;
+ struct {
+ u32 func_id;
+ u32 size;
+ u32 data;
+ } set_info;
+ struct {
+ u32 npu_stats_addr;
+ u32 foe_stats_addr;
+ } stats_info;
+ };
+};
+
+static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
+ void *p, int size)
+{
+ u16 core = 0; /* FIXME */
+ u32 val, offset = core << 4;
+ dma_addr_t dma_addr;
+ int ret;
+
+ dma_addr = dma_map_single(npu->dev, p, size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(npu->dev, dma_addr);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&npu->cores[core].lock);
+
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(0) + offset, dma_addr);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(1) + offset, size);
+ regmap_read(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, &val);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, val + 1);
+ val = FIELD_PREP(MBOX_MSG_FUNC_ID, func_id) | MBOX_MSG_WAIT_RSP;
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(3) + offset, val);
+
+ ret = regmap_read_poll_timeout_atomic(npu->regmap,
+ REG_CR_MBQ0_CTRL(3) + offset,
+ val, (val & MBOX_MSG_DONE),
+ 100, 100 * MSEC_PER_SEC);
+ if (!ret && FIELD_GET(MBOX_MSG_STATUS, val) != NPU_MBOX_SUCCESS)
+ ret = -EINVAL;
+
+ spin_unlock_bh(&npu->cores[core].lock);
+
+ dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
+ struct reserved_mem *rmem)
+{
+ const struct firmware *fw;
+ void __iomem *addr;
+ int ret;
+
+ ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_RV32, dev);
+ if (ret)
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
+
+ if (fw->size > NPU_EN7581_FIRMWARE_RV32_MAX_SIZE) {
+ dev_err(dev, "%s: fw size too overlimit (%zu)\n",
+ NPU_EN7581_FIRMWARE_RV32, fw->size);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ addr = devm_ioremap(dev, rmem->base, rmem->size);
+ if (!addr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy_toio(addr, fw->data, fw->size);
+ release_firmware(fw);
+
+ ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_DATA, dev);
+ if (ret)
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
+
+ if (fw->size > NPU_EN7581_FIRMWARE_DATA_MAX_SIZE) {
+ dev_err(dev, "%s: fw size too overlimit (%zu)\n",
+ NPU_EN7581_FIRMWARE_DATA, fw->size);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ memcpy_toio(base + REG_NPU_LOCAL_SRAM, fw->data, fw->size);
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance)
+{
+ struct airoha_npu *npu = npu_instance;
+
+ /* clear mbox interrupt status */
+ regmap_write(npu->regmap, REG_CR_MBOX_INT_STATUS,
+ MBOX_INT_STATUS_MASK);
+
+ /* acknowledge npu */
+ regmap_update_bits(npu->regmap, REG_CR_MBQ8_CTRL(3),
+ MBOX_MSG_STATUS | MBOX_MSG_DONE, MBOX_MSG_DONE);
+
+ return IRQ_HANDLED;
+}
+
+static void airoha_npu_wdt_work(struct work_struct *work)
+{
+ struct airoha_npu_core *core;
+ struct airoha_npu *npu;
+ void *dump;
+ u32 val[3];
+ int c;
+
+ core = container_of(work, struct airoha_npu_core, wdt_work);
+ npu = core->npu;
+
+ dump = vzalloc(NPU_DUMP_SIZE);
+ if (!dump)
+ return;
+
+ c = core - &npu->cores[0];
+ regmap_bulk_read(npu->regmap, REG_PC_DBG(c), val, ARRAY_SIZE(val));
+ snprintf(dump, NPU_DUMP_SIZE, "PC: %08x SP: %08x LR: %08x\n",
+ val[0], val[1], val[2]);
+
+ dev_coredumpv(npu->dev, dump, NPU_DUMP_SIZE, GFP_KERNEL);
+}
+
+static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance)
+{
+ struct airoha_npu_core *core = core_instance;
+ struct airoha_npu *npu = core->npu;
+ int c = core - &npu->cores[0];
+ u32 val;
+
+ regmap_set_bits(npu->regmap, REG_WDT_TIMER_CTRL(c), WDT_INTR_MASK);
+ if (!regmap_read(npu->regmap, REG_WDT_TIMER_CTRL(c), &val) &&
+ FIELD_GET(WDT_EN_MASK, val))
+ schedule_work(&core->wdt_work);
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_npu_ppe_init(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT;
+ ppe_data->init_info.ppe_type = PPE_TYPE_L2B_IPV4_IPV6;
+ ppe_data->init_info.wan_mode = QDMA_WAN_ETHER;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
+ ppe_data->set_info.func_id = PPE_SRAM_RESET_VAL;
+ ppe_data->set_info.data = foe_addr;
+ ppe_data->set_info.size = sram_num_entries;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash, bool ppe2)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
+ ppe_data->set_info.data = foe_addr;
+ ppe_data->set_info.size = entry_size;
+ ppe_data->set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
+ : PPE_SRAM_SET_ENTRY;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ if (err)
+ goto out;
+
+ ppe_data->set_info.func_id = PPE_SRAM_SET_VAL;
+ ppe_data->set_info.data = hash;
+ ppe_data->set_info.size = sizeof(u32);
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+out:
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_stats_setup(struct airoha_npu *npu,
+ dma_addr_t foe_stats_addr)
+{
+ int err, size = PPE_STATS_NUM_ENTRIES * sizeof(*npu->stats);
+ struct ppe_mbox_data *ppe_data;
+
+ if (!size) /* flow stats are disabled */
+ return 0;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP;
+ ppe_data->stats_info.foe_stats_addr = foe_stats_addr;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ if (err)
+ goto out;
+
+ npu->stats = devm_ioremap(npu->dev,
+ ppe_data->stats_info.npu_stats_addr,
+ size);
+ if (!npu->stats)
+ err = -ENOMEM;
+out:
+ kfree(ppe_data);
+
+ return err;
+}
+
+struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct airoha_npu *npu;
+
+ np = of_parse_phandle(dev->of_node, "airoha,npu", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+
+ if (!pdev) {
+ dev_err(dev, "cannot find device node %s\n", np->name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(dev, "failed to get the device driver module\n");
+ npu = ERR_PTR(-ENODEV);
+ goto error_pdev_put;
+ }
+
+ npu = platform_get_drvdata(pdev);
+ if (!npu) {
+ npu = ERR_PTR(-ENODEV);
+ goto error_module_put;
+ }
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
+ dev_err(&pdev->dev,
+ "failed to create device link to consumer %s\n",
+ dev_name(dev));
+ npu = ERR_PTR(-EINVAL);
+ goto error_module_put;
+ }
+
+ if (stats_addr) {
+ int err;
+
+ err = airoha_npu_stats_setup(npu, *stats_addr);
+ if (err) {
+ dev_err(dev, "failed to allocate npu stats buffer\n");
+ npu = ERR_PTR(err);
+ goto error_module_put;
+ }
+ }
+
+ return npu;
+
+error_module_put:
+ module_put(THIS_MODULE);
+error_pdev_put:
+ platform_device_put(pdev);
+
+ return npu;
+}
+EXPORT_SYMBOL_GPL(airoha_npu_get);
+
+void airoha_npu_put(struct airoha_npu *npu)
+{
+ module_put(THIS_MODULE);
+ put_device(npu->dev);
+}
+EXPORT_SYMBOL_GPL(airoha_npu_put);
+
+static const struct of_device_id of_airoha_npu_match[] = {
+ { .compatible = "airoha,en7581-npu" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_airoha_npu_match);
+
+static const struct regmap_config regmap_config = {
+ .name = "npu",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .disable_locking = true,
+};
+
+static int airoha_npu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reserved_mem *rmem;
+ struct airoha_npu *npu;
+ struct device_node *np;
+ void __iomem *base;
+ int i, irq, err;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ npu = devm_kzalloc(dev, sizeof(*npu), GFP_KERNEL);
+ if (!npu)
+ return -ENOMEM;
+
+ npu->dev = dev;
+ npu->ops.ppe_init = airoha_npu_ppe_init;
+ npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
+ npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
+ npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
+
+ npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(npu->regmap))
+ return PTR_ERR(npu->regmap);
+
+ np = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+
+ if (!rmem)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_mbox_handler,
+ IRQF_SHARED, "airoha-npu-mbox", npu);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++) {
+ struct airoha_npu_core *core = &npu->cores[i];
+
+ spin_lock_init(&core->lock);
+ core->npu = npu;
+
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_wdt_handler,
+ IRQF_SHARED, "airoha-npu-wdt", core);
+ if (err)
+ return err;
+
+ INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
+ }
+
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ err = airoha_npu_run_firmware(dev, base, rmem);
+ if (err)
+ return dev_err_probe(dev, err, "failed to run npu firmware\n");
+
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(10),
+ rmem->base + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1);
+ msleep(100);
+
+ /* setting booting address */
+ for (i = 0; i < NPU_NUM_CORES; i++)
+ regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), rmem->base);
+ usleep_range(1000, 2000);
+
+ /* enable NPU cores */
+ /* do not start core3 since it is used for WiFi offloading */
+ regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xf7);
+ regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
+ msleep(100);
+
+ platform_set_drvdata(pdev, npu);
+
+ return 0;
+}
+
+static void airoha_npu_remove(struct platform_device *pdev)
+{
+ struct airoha_npu *npu = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++)
+ cancel_work_sync(&npu->cores[i].wdt_work);
+}
+
+static struct platform_driver airoha_npu_driver = {
+ .probe = airoha_npu_probe,
+ .remove = airoha_npu_remove,
+ .driver = {
+ .name = "airoha-npu",
+ .of_match_table = of_airoha_npu_match,
+ },
+};
+module_platform_driver(airoha_npu_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
diff --git a/drivers/net/ethernet/airoha/airoha_npu.h b/drivers/net/ethernet/airoha/airoha_npu.h
new file mode 100644
index 000000000000..98ec3be74ce4
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_npu.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#define NPU_NUM_CORES 8
+
+struct airoha_npu {
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct airoha_npu_core {
+ struct airoha_npu *npu;
+ /* protect concurrent npu memory accesses */
+ spinlock_t lock;
+ struct work_struct wdt_work;
+ } cores[NPU_NUM_CORES];
+
+ struct airoha_foe_stats __iomem *stats;
+
+ struct {
+ int (*ppe_init)(struct airoha_npu *npu);
+ int (*ppe_deinit)(struct airoha_npu *npu);
+ int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries);
+ int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash,
+ bool ppe2);
+ } ops;
+};
+
+struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr);
+void airoha_npu_put(struct airoha_npu *npu);
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
new file mode 100644
index 000000000000..0e217acfc5ef
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -0,0 +1,1343 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/rhashtable.h>
+#include <net/ipv6.h>
+#include <net/pkt_cls.h>
+
+#include "airoha_npu.h"
+#include "airoha_regs.h"
+#include "airoha_eth.h"
+
+static DEFINE_MUTEX(flow_offload_mutex);
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params airoha_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
+ .key_len = sizeof(unsigned long),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params airoha_l2_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
+ .key_len = 2 * ETH_ALEN,
+ .automatic_shrinking = true,
+};
+
+static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
+{
+ return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
+}
+
+static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
+{
+ u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
+
+ return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
+}
+
+static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
+{
+ u32 sram_tb_size, sram_num_entries, dram_num_entries;
+ struct airoha_eth *eth = ppe->eth;
+ int i;
+
+ sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
+ dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
+
+ for (i = 0; i < PPE_NUM; i++) {
+ int p;
+
+ airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
+ ppe->foe_dma + sram_tb_size);
+
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
+ PPE_BIND_AGE0_DELTA_NON_L4 |
+ PPE_BIND_AGE0_DELTA_UDP,
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
+ PPE_BIND_AGE1_DELTA_TCP_FIN |
+ PPE_BIND_AGE1_DELTA_TCP,
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
+ PPE_SRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH1_EN_MASK |
+ PPE_DRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH0_MODE_MASK |
+ PPE_SRAM_HASH1_MODE_MASK |
+ PPE_DRAM_HASH0_MODE_MASK |
+ PPE_DRAM_HASH1_MODE_MASK,
+ FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
+ FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
+ PPE_TB_CFG_SEARCH_MISS_MASK |
+ PPE_TB_CFG_KEEPALIVE_MASK |
+ PPE_TB_ENTRY_SIZE_MASK,
+ FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
+ FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
+
+ airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
+
+ for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
+ airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
+ FP0_EGRESS_MTU_MASK |
+ FP1_EGRESS_MTU_MASK,
+ FIELD_PREP(FP0_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU) |
+ FIELD_PREP(FP1_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU));
+ }
+
+ if (airoha_ppe2_is_enabled(eth)) {
+ sram_num_entries =
+ PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES);
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_num_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_num_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+ } else {
+ sram_num_entries =
+ PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES);
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_num_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+ }
+}
+
+static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
+{
+ void *dest = eth + act->mangle.offset;
+ const void *src = &act->mangle.val;
+
+ if (act->mangle.offset > 8)
+ return;
+
+ if (act->mangle.mask == 0xffff) {
+ src += 2;
+ dest += 2;
+ }
+
+ memcpy(dest, src, act->mangle.mask ? 2 : 4);
+}
+
+static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ u32 val = be32_to_cpu((__force __be32)act->mangle.val);
+
+ switch (act->mangle.offset) {
+ case 0:
+ if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
+ data->dst_port = cpu_to_be16(val);
+ else
+ data->src_port = cpu_to_be16(val >> 16);
+ break;
+ case 2:
+ data->dst_port = cpu_to_be16(val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ __be32 *dest;
+
+ switch (act->mangle.offset) {
+ case offsetof(struct iphdr, saddr):
+ dest = &data->v4.src_addr;
+ break;
+ case offsetof(struct iphdr, daddr):
+ dest = &data->v4.dst_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(dest, &act->mangle.val, sizeof(u32));
+
+ return 0;
+}
+
+static int airoha_get_dsa_port(struct net_device **dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct dsa_port *dp = dsa_port_from_netdev(*dev);
+
+ if (IS_ERR(dp))
+ return -ENODEV;
+
+ *dev = dsa_port_to_conduit(dp);
+ return dp->index;
+#else
+ return -ENODEV;
+#endif
+}
+
+static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
+ struct ethhdr *eh)
+{
+ br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
+ br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
+ br->src_mac_hi = get_unaligned_be16(eh->h_source);
+ br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
+}
+
+static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
+ struct airoha_foe_entry *hwe,
+ struct net_device *dev, int type,
+ struct airoha_flow_data *data,
+ int l4proto)
+{
+ int dsa_port = airoha_get_dsa_port(&dev);
+ struct airoha_foe_mac_info_common *l2;
+ u32 qdata, ports_pad, val;
+ u8 smac_id = 0xf;
+
+ memset(hwe, 0, sizeof(*hwe));
+
+ val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
+ AIROHA_FOE_IB1_BIND_TTL;
+ hwe->ib1 = val;
+
+ val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
+ AIROHA_FOE_IB2_PSE_QOS;
+ if (dsa_port >= 0)
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
+
+ if (dev) {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u8 pse_port;
+
+ if (!airoha_is_valid_gdm_port(eth, port))
+ return -EINVAL;
+
+ if (dsa_port >= 0)
+ pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
+ else
+ pse_port = 2; /* uplink relies on GDM2 loopback */
+ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
+
+ /* For downlink traffic consume SRAM memory for hw forwarding
+ * descriptors queue.
+ */
+ if (airhoa_is_lan_gdm_port(port))
+ val |= AIROHA_FOE_IB2_FAST_PATH;
+
+ smac_id = port->id;
+ }
+
+ if (is_multicast_ether_addr(data->eth.h_dest))
+ val |= AIROHA_FOE_IB2_MULTICAST;
+
+ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
+ if (type == PPE_PKT_TYPE_IPV4_ROUTE)
+ hwe->ipv4.orig_tuple.ports = ports_pad;
+ if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
+ hwe->ipv6.ports = ports_pad;
+
+ qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
+ hwe->bridge.data = qdata;
+ hwe->bridge.ib2 = val;
+ l2 = &hwe->bridge.l2.common;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ hwe->ipv6.data = qdata;
+ hwe->ipv6.ib2 = val;
+ l2 = &hwe->ipv6.l2;
+ } else {
+ hwe->ipv4.data = qdata;
+ hwe->ipv4.ib2 = val;
+ l2 = &hwe->ipv4.l2.common;
+ }
+
+ l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
+ l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
+ if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
+ l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
+ hwe->ipv4.l2.src_mac_lo =
+ get_unaligned_be16(data->eth.h_source + 4);
+ } else {
+ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
+ }
+
+ if (data->vlan.num) {
+ l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
+ l2->vlan1 = data->vlan.hdr[0].id;
+ if (data->vlan.num == 2)
+ l2->vlan2 = data->vlan.hdr[1].id;
+ } else if (dsa_port >= 0) {
+ l2->etype = BIT(15) | BIT(dsa_port);
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ l2->etype = ETH_P_IPV6;
+ } else {
+ l2->etype = ETH_P_IP;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data,
+ bool egress)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ struct airoha_foe_ipv4_tuple *t;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ if (egress) {
+ t = &hwe->ipv4.new_tuple;
+ break;
+ }
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ t = &hwe->ipv4.orig_tuple;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ t->src_ip = be32_to_cpu(data->v4.src_addr);
+ t->dest_ip = be32_to_cpu(data->v4.dst_addr);
+
+ if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
+ t->src_port = be16_to_cpu(data->src_port);
+ t->dest_port = be16_to_cpu(data->dst_port);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data)
+
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 *src, *dest;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ hwe->ipv6.src_port = be16_to_cpu(data->src_port);
+ hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ src = hwe->ipv6.src_ip;
+ dest = hwe->ipv6.dest_ip;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
+ ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
+
+ return 0;
+}
+
+static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 hash, hv1, hv2, hv3;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ hv1 = hwe->ipv4.orig_tuple.ports;
+ hv2 = hwe->ipv4.orig_tuple.dest_ip;
+ hv3 = hwe->ipv4.orig_tuple.src_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
+ hv1 ^= hwe->ipv6.ports;
+
+ hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
+ hv2 ^= hwe->ipv6.dest_ip[0];
+
+ hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
+ hv3 ^= hwe->ipv6.src_ip[0];
+ break;
+ case PPE_PKT_TYPE_BRIDGE: {
+ struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
+
+ hv1 = l2->common.src_mac_hi & 0xffff;
+ hv1 = hv1 << 16 | l2->src_mac_lo;
+
+ hv2 = l2->common.dest_mac_lo;
+ hv2 = hv2 << 16;
+ hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
+
+ hv3 = l2->common.dest_mac_hi;
+ break;
+ }
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ default:
+ WARN_ON_ONCE(1);
+ return PPE_HASH_MASK;
+ }
+
+ hash = (hv1 & hv2) | ((~hv1) & hv3);
+ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
+ hash ^= hv1 ^ hv2 ^ hv3;
+ hash ^= hash >> 16;
+ hash &= PPE_NUM_ENTRIES - 1;
+
+ return hash;
+}
+
+static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash)
+{
+ if (!airoha_ppe2_is_enabled(ppe->eth))
+ return hash;
+
+ return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES
+ : hash;
+}
+
+static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
+ struct airoha_npu *npu,
+ int index)
+{
+ memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
+ memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
+}
+
+static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
+ struct airoha_npu *npu)
+{
+ int i;
+
+ for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++)
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
+}
+
+static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
+ struct airoha_npu *npu,
+ struct airoha_foe_entry *hwe,
+ u32 hash)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 index, pse_port, val, *data, *ib2, *meter;
+ u8 nbq;
+
+ index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
+ if (index >= PPE_STATS_NUM_ENTRIES)
+ return;
+
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ data = &hwe->bridge.data;
+ ib2 = &hwe->bridge.ib2;
+ meter = &hwe->bridge.l2.meter;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = &hwe->ipv6.data;
+ ib2 = &hwe->ipv6.ib2;
+ meter = &hwe->ipv6.meter;
+ } else {
+ data = &hwe->ipv4.data;
+ ib2 = &hwe->ipv4.ib2;
+ meter = &hwe->ipv4.l2.meter;
+ }
+
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
+
+ val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
+ *data = (*data & ~AIROHA_FOE_ACTDP) |
+ FIELD_PREP(AIROHA_FOE_ACTDP, val);
+
+ val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
+ *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
+
+ pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
+ nbq = pse_port == 1 ? 6 : 5;
+ *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS);
+ *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
+ FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
+}
+
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash)
+{
+ if (hash < PPE_SRAM_NUM_ENTRIES) {
+ u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
+ struct airoha_eth *eth = ppe->eth;
+ bool ppe2;
+ u32 val;
+ int i;
+
+ ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
+ hash >= PPE1_SRAM_NUM_ENTRIES;
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
+ FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
+ PPE_SRAM_CTRL_REQ_MASK);
+ if (read_poll_timeout_atomic(airoha_fe_rr, val,
+ val & PPE_SRAM_CTRL_ACK_MASK,
+ 10, 100, false, eth,
+ REG_PPE_RAM_CTRL(ppe2)))
+ return NULL;
+
+ for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
+ hwe[i] = airoha_fe_rr(eth,
+ REG_PPE_RAM_ENTRY(ppe2, i));
+ }
+
+ return ppe->foe + hash * sizeof(struct airoha_foe_entry);
+}
+
+static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
+ struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ int len;
+
+ if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
+ return false;
+
+ if (type > PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct airoha_foe_entry, ipv6.data);
+ else
+ len = offsetof(struct airoha_foe_entry, ipv4.ib2);
+
+ return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
+}
+
+static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_foe_entry *e,
+ u32 hash)
+{
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ u32 ts = airoha_ppe_get_timestamp(ppe);
+ struct airoha_eth *eth = ppe->eth;
+ struct airoha_npu *npu;
+ int err = 0;
+
+ memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
+ wmb();
+
+ e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
+ hwe->ib1 = e->ib1;
+
+ rcu_read_lock();
+
+ npu = rcu_dereference(eth->npu);
+ if (!npu) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
+
+ if (hash < PPE_SRAM_NUM_ENTRIES) {
+ dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
+ bool ppe2 = airoha_ppe2_is_enabled(eth) &&
+ hash >= PPE1_SRAM_NUM_ENTRIES;
+
+ err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe),
+ hash, ppe2);
+ }
+unlock:
+ rcu_read_unlock();
+
+ return err;
+}
+
+static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ lockdep_assert_held(&ppe_lock);
+
+ hlist_del_init(&e->list);
+ if (e->hash != 0xffff) {
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
+ e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
+ AIROHA_FOE_STATE_INVALID);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
+ e->hash = 0xffff;
+ }
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+ hlist_del_init(&e->l2_subflow_node);
+ kfree(e);
+ }
+}
+
+static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct hlist_head *head = &e->l2_flows;
+ struct hlist_node *n;
+
+ lockdep_assert_held(&ppe_lock);
+
+ rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
+ hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
+ airoha_ppe_foe_remove_flow(ppe, e);
+}
+
+static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ spin_lock_bh(&ppe_lock);
+
+ if (e->type == FLOW_TYPE_L2)
+ airoha_ppe_foe_remove_l2_flow(ppe, e);
+ else
+ airoha_ppe_foe_remove_flow(ppe, e);
+
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e,
+ u32 hash)
+{
+ u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
+ struct airoha_foe_entry *hwe_p, hwe;
+ struct airoha_flow_table_entry *f;
+ int type;
+
+ hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
+ if (!hwe_p)
+ return -EINVAL;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return -ENOMEM;
+
+ hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
+ f->type = FLOW_TYPE_L2_SUBFLOW;
+ f->hash = hash;
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
+
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
+ hwe.ipv6.ib2 = e->data.bridge.ib2;
+ /* setting smac_id to 0xf instruct the hw to keep original
+ * source mac address
+ */
+ hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
+ 0xf);
+ } else {
+ memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
+ sizeof(hwe.bridge.l2));
+ hwe.bridge.ib2 = e->data.bridge.ib2;
+ if (type == PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
+ sizeof(hwe.ipv4.new_tuple));
+ }
+
+ hwe.bridge.data = e->data.bridge.data;
+ airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
+
+ return 0;
+}
+
+static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
+ struct sk_buff *skb,
+ u32 hash)
+{
+ struct airoha_flow_table_entry *e;
+ struct airoha_foe_bridge br = {};
+ struct airoha_foe_entry *hwe;
+ bool commit_done = false;
+ struct hlist_node *n;
+ u32 index, state;
+
+ spin_lock_bh(&ppe_lock);
+
+ hwe = airoha_ppe_foe_get_entry(ppe, hash);
+ if (!hwe)
+ goto unlock;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state == AIROHA_FOE_STATE_BIND)
+ goto unlock;
+
+ index = airoha_ppe_foe_get_entry_hash(hwe);
+ hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ e->hash = 0xffff;
+ airoha_ppe_foe_remove_flow(ppe, e);
+ }
+ continue;
+ }
+
+ if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
+ e->hash = 0xffff;
+ continue;
+ }
+
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
+ commit_done = true;
+ e->hash = hash;
+ }
+
+ if (commit_done)
+ goto unlock;
+
+ airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
+ e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
+ airoha_l2_flow_table_params);
+ if (e)
+ airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
+unlock:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct airoha_flow_table_entry *prev;
+
+ e->type = FLOW_TYPE_L2;
+ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
+ if (!prev)
+ return 0;
+
+ if (IS_ERR(prev))
+ return PTR_ERR(prev);
+
+ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
+ &e->l2_node,
+ airoha_l2_flow_table_params);
+}
+
+static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ u32 hash;
+
+ if (type == PPE_PKT_TYPE_BRIDGE)
+ return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
+
+ hash = airoha_ppe_foe_get_entry_hash(&e->data);
+ e->type = FLOW_TYPE_L4;
+ e->hash = 0xffff;
+
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&e->list, &ppe->foe_flow[hash]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
+{
+ u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
+ int idle;
+
+ if (state == AIROHA_FOE_STATE_BIND) {
+ ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
+ ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ } else {
+ ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
+ now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
+ ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
+ }
+ idle = now - ts;
+
+ return idle < 0 ? idle + ts_mask + 1 : idle;
+}
+
+static void
+airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+ struct airoha_flow_table_entry *iter;
+ struct hlist_node *n;
+
+ lockdep_assert_held(&ppe_lock);
+
+ hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
+ struct airoha_foe_entry *hwe;
+ u32 ib1, state;
+ int idle;
+
+ hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
+ if (!hwe)
+ continue;
+
+ ib1 = READ_ONCE(hwe->ib1);
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ iter->hash = 0xffff;
+ airoha_ppe_foe_remove_flow(ppe, iter);
+ continue;
+ }
+
+ idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
+ if (idle >= min_idle)
+ continue;
+
+ min_idle = idle;
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ }
+}
+
+static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct airoha_foe_entry *hwe_p, hwe = {};
+
+ spin_lock_bh(&ppe_lock);
+
+ if (e->type == FLOW_TYPE_L2) {
+ airoha_ppe_foe_flow_l2_entry_update(ppe, e);
+ goto unlock;
+ }
+
+ if (e->hash == 0xffff)
+ goto unlock;
+
+ hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
+ if (!hwe_p)
+ goto unlock;
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
+ e->hash = 0xffff;
+ goto unlock;
+ }
+
+ e->data.ib1 = hwe.ib1;
+unlock:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ airoha_ppe_foe_flow_entry_update(ppe, e);
+
+ return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+}
+
+static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+ struct airoha_flow_data data = {};
+ struct net_device *odev = NULL;
+ struct flow_action_entry *act;
+ struct airoha_foe_entry hwe;
+ int err, i, offload_type;
+ u16 addr_type = 0;
+ u8 l4proto = 0;
+
+ if (rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params))
+ return -EEXIST;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ l4proto = match.key->ip_proto;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ switch (addr_type) {
+ case 0:
+ offload_type = PPE_PKT_TYPE_BRIDGE;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
+ airoha_ppe_flow_mangle_eth(act, &data.eth);
+ break;
+ case FLOW_ACTION_REDIRECT:
+ odev = act->dev;
+ break;
+ case FLOW_ACTION_CSUM:
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (data.vlan.num == 2 ||
+ act->vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
+ data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
+ data.vlan.num++;
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ break;
+ case FLOW_ACTION_PPPOE_PUSH:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!is_valid_ether_addr(data.eth.h_source) ||
+ !is_valid_ether_addr(data.eth.h_dest))
+ return -EINVAL;
+
+ err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
+ &data, l4proto);
+ if (err)
+ return err;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports ports;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ flow_rule_match_ports(rule, &ports);
+ data.src_port = ports.key->src;
+ data.dst_port = ports.key->dst;
+ } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
+ return -EOPNOTSUPP;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs addrs;
+
+ flow_rule_match_ipv4_addrs(rule, &addrs);
+ data.v4.src_addr = addrs.key->src;
+ data.v4.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs addrs;
+
+ flow_rule_match_ipv6_addrs(rule, &addrs);
+
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id != FLOW_ACTION_MANGLE)
+ continue;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ switch (act->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ err = airoha_ppe_flow_mangle_ports(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ err = airoha_ppe_flow_mangle_ipv4(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ /* handled earlier */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
+ if (err)
+ return err;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->cookie = f->cookie;
+ memcpy(&e->data, &hwe, sizeof(e->data));
+
+ err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
+ if (err)
+ goto free_entry;
+
+ err = rhashtable_insert_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ if (err < 0)
+ goto remove_foe_entry;
+
+ return 0;
+
+remove_foe_entry:
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+free_entry:
+ kfree(e);
+
+ return err;
+}
+
+static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+ rhashtable_remove_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ kfree(e);
+
+ return 0;
+}
+
+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+ struct airoha_foe_stats64 *stats)
+{
+ u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
+ struct airoha_eth *eth = ppe->eth;
+ struct airoha_npu *npu;
+
+ if (index >= PPE_STATS_NUM_ENTRIES)
+ return;
+
+ rcu_read_lock();
+
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ u64 packets = ppe->foe_stats[index].packets;
+ u64 bytes = ppe->foe_stats[index].bytes;
+ struct airoha_foe_stats npu_stats;
+
+ memcpy_fromio(&npu_stats, &npu->stats[index],
+ sizeof(*npu->stats));
+ stats->packets = packets << 32 | npu_stats.packets;
+ stats->bytes = bytes << 32 | npu_stats.bytes;
+ }
+
+ rcu_read_unlock();
+}
+
+static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+ u32 idle;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ idle = airoha_ppe_entry_idle_time(eth->ppe, e);
+ f->stats.lastused = jiffies - idle * HZ;
+
+ if (e->hash != 0xffff) {
+ struct airoha_foe_stats64 stats = {};
+
+ airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
+ f->stats.pkts += (stats.packets - e->stats.packets);
+ f->stats.bytes += (stats.bytes - e->stats.bytes);
+ e->stats = stats;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return airoha_ppe_flow_offload_replace(port, f);
+ case FLOW_CLS_DESTROY:
+ return airoha_ppe_flow_offload_destroy(port, f);
+ case FLOW_CLS_STATS:
+ return airoha_ppe_flow_offload_stats(port, f);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
+ struct airoha_npu *npu)
+{
+ int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
+ struct airoha_foe_entry *hwe = ppe->foe;
+
+ if (airoha_ppe2_is_enabled(ppe->eth))
+ sram_num_entries = sram_num_entries / 2;
+
+ for (i = 0; i < sram_num_entries; i++)
+ memset(&hwe[i], 0, sizeof(*hwe));
+
+ return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
+ PPE_SRAM_NUM_ENTRIES);
+}
+
+static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_npu_get(eth->dev,
+ &eth->ppe->foe_stats_dma);
+
+ if (IS_ERR(npu)) {
+ request_module("airoha-npu");
+ npu = airoha_npu_get(eth->dev, &eth->ppe->foe_stats_dma);
+ }
+
+ return npu;
+}
+
+static int airoha_ppe_offload_setup(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_ppe_npu_get(eth);
+ int err;
+
+ if (IS_ERR(npu))
+ return PTR_ERR(npu);
+
+ err = npu->ops.ppe_init(npu);
+ if (err)
+ goto error_npu_put;
+
+ airoha_ppe_hw_init(eth->ppe);
+ err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
+ if (err)
+ goto error_npu_put;
+
+ airoha_ppe_foe_flow_stats_reset(eth->ppe, npu);
+
+ rcu_assign_pointer(eth->npu, npu);
+ synchronize_rcu();
+
+ return 0;
+
+error_npu_put:
+ airoha_npu_put(npu);
+
+ return err;
+}
+
+int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct flow_cls_offload *cls = type_data;
+ struct airoha_eth *eth = port->qdma->eth;
+ int err = 0;
+
+ mutex_lock(&flow_offload_mutex);
+
+ if (!eth->npu)
+ err = airoha_ppe_offload_setup(eth);
+ if (!err)
+ err = airoha_ppe_flow_offload_cmd(port, cls);
+
+ mutex_unlock(&flow_offload_mutex);
+
+ return err;
+}
+
+void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
+ u16 hash)
+{
+ u16 now, diff;
+
+ if (hash > PPE_HASH_MASK)
+ return;
+
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
+ return;
+
+ ppe->foe_check_time[hash] = now;
+ airoha_ppe_foe_insert_entry(ppe, skb, hash);
+}
+
+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct net_device *dev = port->dev;
+ const u8 *addr = dev->dev_addr;
+ u32 val;
+
+ val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
+ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
+ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
+
+ val = (addr[0] << 8) | addr[1];
+ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
+ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
+ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
+ FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
+}
+
+int airoha_ppe_init(struct airoha_eth *eth)
+{
+ struct airoha_ppe *ppe;
+ int foe_size, err;
+
+ ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return -ENOMEM;
+
+ foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
+ ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
+ GFP_KERNEL);
+ if (!ppe->foe)
+ return -ENOMEM;
+
+ ppe->eth = eth;
+ eth->ppe = ppe;
+
+ ppe->foe_flow = devm_kzalloc(eth->dev,
+ PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
+ GFP_KERNEL);
+ if (!ppe->foe_flow)
+ return -ENOMEM;
+
+ foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats);
+ if (foe_size) {
+ ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
+ &ppe->foe_stats_dma,
+ GFP_KERNEL);
+ if (!ppe->foe_stats)
+ return -ENOMEM;
+ }
+
+ err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
+ if (err)
+ return err;
+
+ err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
+ if (err)
+ goto error_flow_table_destroy;
+
+ err = airoha_ppe_debugfs_init(ppe);
+ if (err)
+ goto error_l2_flow_table_destroy;
+
+ return 0;
+
+error_l2_flow_table_destroy:
+ rhashtable_destroy(&ppe->l2_flows);
+error_flow_table_destroy:
+ rhashtable_destroy(&eth->flow_table);
+
+ return err;
+}
+
+void airoha_ppe_deinit(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu;
+
+ rcu_read_lock();
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ npu->ops.ppe_deinit(npu);
+ airoha_npu_put(npu);
+ }
+ rcu_read_unlock();
+
+ rhashtable_destroy(&eth->ppe->l2_flows);
+ rhashtable_destroy(&eth->flow_table);
+ debugfs_remove(eth->ppe->debugfs_dir);
+}
diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
new file mode 100644
index 000000000000..05a756233f6a
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include "airoha_eth.h"
+
+static void airoha_debugfs_ppe_print_tuple(struct seq_file *m,
+ void *src_addr, void *dest_addr,
+ u16 *src_port, u16 *dest_port,
+ bool ipv6)
+{
+ __be32 n_addr[IPV6_ADDR_WORDS];
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, src_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", src_addr);
+ }
+ if (src_port)
+ seq_printf(m, ":%d", *src_port);
+
+ seq_puts(m, "->");
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, dest_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", dest_addr);
+ }
+ if (dest_port)
+ seq_printf(m, ":%d", *dest_port);
+}
+
+static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
+ bool bind)
+{
+ static const char *const ppe_type_str[] = {
+ [PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
+ [PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
+ [PPE_PKT_TYPE_BRIDGE] = "L2B",
+ [PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
+ [PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
+ [PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
+ [PPE_PKT_TYPE_IPV6_6RD] = "6RD",
+ };
+ static const char *const ppe_state_str[] = {
+ [AIROHA_FOE_STATE_INVALID] = "INV",
+ [AIROHA_FOE_STATE_UNBIND] = "UNB",
+ [AIROHA_FOE_STATE_BIND] = "BND",
+ [AIROHA_FOE_STATE_FIN] = "FIN",
+ };
+ struct airoha_ppe *ppe = m->private;
+ int i;
+
+ for (i = 0; i < PPE_NUM_ENTRIES; i++) {
+ const char *state_str, *type_str = "UNKNOWN";
+ void *src_addr = NULL, *dest_addr = NULL;
+ u16 *src_port = NULL, *dest_port = NULL;
+ struct airoha_foe_mac_info_common *l2;
+ unsigned char h_source[ETH_ALEN] = {};
+ struct airoha_foe_stats64 stats = {};
+ unsigned char h_dest[ETH_ALEN];
+ struct airoha_foe_entry *hwe;
+ u32 type, state, ib2, data;
+ bool ipv6 = false;
+
+ hwe = airoha_ppe_foe_get_entry(ppe, i);
+ if (!hwe)
+ continue;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (!state)
+ continue;
+
+ if (bind && state != AIROHA_FOE_STATE_BIND)
+ continue;
+
+ state_str = ppe_state_str[state % ARRAY_SIZE(ppe_state_str)];
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ if (type < ARRAY_SIZE(ppe_type_str) && ppe_type_str[type])
+ type_str = ppe_type_str[type];
+
+ seq_printf(m, "%05x %s %7s", i, state_str, type_str);
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.orig_tuple.src_port;
+ dest_port = &hwe->ipv4.orig_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.orig_tuple.src_ip;
+ dest_addr = &hwe->ipv4.orig_tuple.dest_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ src_port = &hwe->ipv6.src_port;
+ dest_port = &hwe->ipv6.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ src_addr = &hwe->ipv6.src_ip;
+ dest_addr = &hwe->ipv6.dest_ip;
+ ipv6 = true;
+ break;
+ default:
+ break;
+ }
+
+ if (src_addr && dest_addr) {
+ seq_puts(m, " orig=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port, ipv6);
+ }
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.new_tuple.src_port;
+ dest_port = &hwe->ipv4.new_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.new_tuple.src_ip;
+ dest_addr = &hwe->ipv4.new_tuple.dest_ip;
+ seq_puts(m, " new=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port,
+ ipv6);
+ break;
+ default:
+ break;
+ }
+
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = hwe->ipv6.data;
+ ib2 = hwe->ipv6.ib2;
+ l2 = &hwe->ipv6.l2;
+ } else {
+ data = hwe->ipv4.data;
+ ib2 = hwe->ipv4.ib2;
+ l2 = &hwe->ipv4.l2.common;
+ *((__be16 *)&h_source[4]) =
+ cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
+ }
+
+ airoha_ppe_foe_entry_get_stats(ppe, i, &stats);
+
+ *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
+ *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
+ *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
+
+ seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
+ " vlan=%d,%d ib1=%08x ib2=%08x"
+ " packets=%llu bytes=%llu\n",
+ h_source, h_dest, l2->etype, data,
+ l2->vlan1, l2->vlan2, hwe->ib1, ib2,
+ stats.packets, stats.bytes);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, false);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_all);
+
+static int airoha_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, true);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_bind);
+
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ ppe->debugfs_dir = debugfs_create_dir("ppe", NULL);
+ debugfs_create_file("entries", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_all_fops);
+ debugfs_create_file("bind", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_bind_fops);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
new file mode 100644
index 000000000000..150c85995cc1
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -0,0 +1,933 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_REGS_H
+#define AIROHA_REGS_H
+
+#include <linux/types.h>
+
+/* FE */
+#define PSE_BASE 0x0100
+#define CSR_IFC_BASE 0x0200
+#define CDM1_BASE 0x0400
+#define GDM1_BASE 0x0500
+#define PPE1_BASE 0x0c00
+#define PPE2_BASE 0x1c00
+
+#define CDM2_BASE 0x1400
+#define GDM2_BASE 0x1500
+
+#define GDM3_BASE 0x1100
+#define GDM4_BASE 0x2500
+
+#define GDM_BASE(_n) \
+ ((_n) == 4 ? GDM4_BASE : \
+ (_n) == 3 ? GDM3_BASE : \
+ (_n) == 2 ? GDM2_BASE : GDM1_BASE)
+
+#define REG_FE_DMA_GLO_CFG 0x0000
+#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
+#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
+
+#define REG_FE_RST_GLO_CFG 0x0004
+#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
+#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
+#define FE_RST_CORE_MASK BIT(0)
+
+#define REG_FE_FOE_TS 0x0010
+
+#define REG_FE_WAN_PORT 0x0024
+#define WAN1_EN_MASK BIT(16)
+#define WAN1_MASK GENMASK(12, 8)
+#define WAN0_MASK GENMASK(4, 0)
+
+#define REG_FE_WAN_MAC_H 0x0030
+#define REG_FE_LAN_MAC_H 0x0040
+
+#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
+#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
+
+#define REG_FE_CDM1_OQ_MAP0 0x0050
+#define REG_FE_CDM1_OQ_MAP1 0x0054
+#define REG_FE_CDM1_OQ_MAP2 0x0058
+#define REG_FE_CDM1_OQ_MAP3 0x005c
+
+#define REG_FE_PCE_CFG 0x0070
+#define PCE_DPI_EN_MASK BIT(2)
+#define PCE_KA_EN_MASK BIT(1)
+#define PCE_MC_EN_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
+#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
+#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
+#define PSE_CFG_WR_EN_MASK BIT(8)
+#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
+#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
+
+#define PSE_FQ_CFG 0x008c
+#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
+
+#define REG_FE_PSE_BUF_SET 0x0090
+#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
+#define PSE_ALLRSV_MASK GENMASK(14, 0)
+
+#define REG_PSE_SHARE_USED_THD 0x0094
+#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
+#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
+
+#define REG_GDM_MISC_CFG 0x0148
+#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
+#define GDM2_CHN_VLD_MODE_MASK BIT(5)
+
+#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
+#define FE_IFC_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PORT_EN 0x01f0
+#define REG_FE_IFC_PORT_EN 0x01f4
+
+#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
+#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
+
+#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
+#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
+#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
+
+#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
+#define PATN_FCPU_EN_MASK BIT(7)
+#define PATN_SWP_EN_MASK BIT(6)
+#define PATN_DP_EN_MASK BIT(5)
+#define PATN_SP_EN_MASK BIT(4)
+#define PATN_TYPE_MASK GENMASK(3, 1)
+#define PATN_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
+#define PATN_DP_MASK GENMASK(31, 16)
+#define PATN_SP_MASK GENMASK(15, 0)
+
+#define REG_CDM1_VLAN_CTRL CDM1_BASE
+#define CDM1_VLAN_MASK GENMASK(31, 16)
+
+#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
+#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
+#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
+#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
+#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
+#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
+#define GDM_DROP_CRC_ERR BIT(23)
+#define GDM_IP4_CKSUM BIT(22)
+#define GDM_TCP_CKSUM BIT(21)
+#define GDM_UDP_CKSUM BIT(20)
+#define GDM_STRIP_CRC BIT(16)
+#define GDM_UCFQ_MASK GENMASK(15, 12)
+#define GDM_BCFQ_MASK GENMASK(11, 8)
+#define GDM_MCFQ_MASK GENMASK(7, 4)
+#define GDM_OCFQ_MASK GENMASK(3, 0)
+
+#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
+#define GDM_INGRESS_FC_EN_MASK BIT(1)
+#define GDM_STAG_EN_MASK BIT(0)
+
+#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
+#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
+#define GDM_LONG_LEN_MASK GENMASK(29, 16)
+
+#define REG_GDM_LPBK_CFG(_n) (GDM_BASE(_n) + 0x1c)
+#define LPBK_GAP_MASK GENMASK(31, 24)
+#define LPBK_LEN_MASK GENMASK(23, 10)
+#define LPBK_CHAN_MASK GENMASK(8, 4)
+#define LPBK_MODE_MASK GENMASK(3, 1)
+#define LPBK_EN_MASK BIT(0)
+
+#define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24)
+#define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28)
+
+#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
+#define FE_CPORT_PAD BIT(26)
+#define FE_CPORT_PORT_XFC_MASK BIT(25)
+#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
+
+#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
+#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
+#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
+
+#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
+#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
+#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
+#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
+#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
+#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
+#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
+#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
+#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
+#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
+#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
+#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
+#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
+#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
+#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
+#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
+#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
+#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
+#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
+#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
+#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
+#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
+#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
+#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
+#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
+#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
+#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
+#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
+#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
+#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
+#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
+#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
+#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
+
+#define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200)
+#define PPE_GLO_CFG_BUSY_MASK BIT(31)
+#define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9)
+#define PPE_GLO_CFG_PSE_HASH_OFS_MASK BIT(6)
+#define PPE_GLO_CFG_PPE_BSWAP_MASK BIT(5)
+#define PPE_GLO_CFG_TTL_DROP_MASK BIT(4)
+#define PPE_GLO_CFG_IP4_CS_DROP_MASK BIT(3)
+#define PPE_GLO_CFG_IP4_L4_CS_DROP_MASK BIT(2)
+#define PPE_GLO_CFG_EN_MASK BIT(0)
+
+#define REG_PPE_PPE_FLOW_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x204)
+#define PPE_FLOW_CFG_IP6_HASH_GRE_KEY_MASK BIT(20)
+#define PPE_FLOW_CFG_IP4_HASH_GRE_KEY_MASK BIT(19)
+#define PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL_MASK BIT(18)
+#define PPE_FLOW_CFG_IP4_NAT_FRAG_MASK BIT(17)
+#define PPE_FLOW_CFG_IP_PROTO_BLACKLIST_MASK BIT(16)
+#define PPE_FLOW_CFG_IP4_DSLITE_MASK BIT(14)
+#define PPE_FLOW_CFG_IP4_NAPT_MASK BIT(13)
+#define PPE_FLOW_CFG_IP4_NAT_MASK BIT(12)
+#define PPE_FLOW_CFG_IP6_6RD_MASK BIT(10)
+#define PPE_FLOW_CFG_IP6_5T_ROUTE_MASK BIT(9)
+#define PPE_FLOW_CFG_IP6_3T_ROUTE_MASK BIT(8)
+#define PPE_FLOW_CFG_IP4_UDP_FRAG_MASK BIT(7)
+#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK BIT(6)
+
+#define REG_PPE_IP_PROTO_CHK(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208)
+#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(15, 0)
+#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(31, 16)
+
+#define REG_PPE_TB_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c)
+#define PPE_SRAM_TB_NUM_ENTRY_MASK GENMASK(26, 24)
+#define PPE_TB_CFG_KEEPALIVE_MASK GENMASK(13, 12)
+#define PPE_TB_CFG_AGE_TCP_FIN_MASK BIT(11)
+#define PPE_TB_CFG_AGE_UDP_MASK BIT(10)
+#define PPE_TB_CFG_AGE_TCP_MASK BIT(9)
+#define PPE_TB_CFG_AGE_UNBIND_MASK BIT(8)
+#define PPE_TB_CFG_AGE_NON_L4_MASK BIT(7)
+#define PPE_TB_CFG_AGE_PREBIND_MASK BIT(6)
+#define PPE_TB_CFG_SEARCH_MISS_MASK GENMASK(5, 4)
+#define PPE_TB_ENTRY_SIZE_MASK BIT(3)
+#define PPE_DRAM_TB_NUM_ENTRY_MASK GENMASK(2, 0)
+
+#define REG_PPE_TB_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x220)
+
+#define REG_PPE_BIND_RATE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x228)
+#define PPE_BIND_RATE_L2B_BIND_MASK GENMASK(31, 16)
+#define PPE_BIND_RATE_BIND_MASK GENMASK(15, 0)
+
+#define REG_PPE_BIND_LIMIT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x22c)
+#define PPE_BIND_LIMIT0_HALF_MASK GENMASK(29, 16)
+#define PPE_BIND_LIMIT0_QUARTER_MASK GENMASK(13, 0)
+
+#define REG_PPE_BIND_LIMIT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x230)
+#define PPE_BIND_LIMIT1_NON_L4_MASK GENMASK(23, 16)
+#define PPE_BIND_LIMIT1_FULL_MASK GENMASK(13, 0)
+
+#define REG_PPE_BND_AGE0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x23c)
+#define PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
+#define PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
+
+#define REG_PPE_UNBIND_AGE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x238)
+#define PPE_UNBIND_AGE_MIN_PACKETS_MASK GENMASK(31, 16)
+#define PPE_UNBIND_AGE_DELTA_MASK GENMASK(7, 0)
+
+#define REG_PPE_BND_AGE1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x240)
+#define PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
+#define PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
+
+#define REG_PPE_HASH_SEED(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x244)
+#define PPE_HASH_SEED 0x12345678
+
+#define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
+#define DFT_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2))
+
+#define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
+
+#define REG_PPE_TB_HASH_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x250)
+#define PPE_DRAM_HASH1_MODE_MASK GENMASK(31, 28)
+#define PPE_DRAM_HASH1_EN_MASK BIT(24)
+#define PPE_DRAM_HASH0_MODE_MASK GENMASK(23, 20)
+#define PPE_DRAM_TABLE_EN_MASK BIT(16)
+#define PPE_SRAM_HASH1_MODE_MASK GENMASK(15, 12)
+#define PPE_SRAM_HASH1_EN_MASK BIT(8)
+#define PPE_SRAM_HASH0_MODE_MASK GENMASK(7, 4)
+#define PPE_SRAM_TABLE_EN_MASK BIT(0)
+
+#define REG_PPE_MTU_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x304)
+#define REG_PPE_MTU(_m, _n) (REG_PPE_MTU_BASE(_m) + ((_n) << 2))
+#define FP1_EGRESS_MTU_MASK GENMASK(29, 16)
+#define FP0_EGRESS_MTU_MASK GENMASK(13, 0)
+
+#define REG_PPE_RAM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x31c)
+#define PPE_SRAM_CTRL_ACK_MASK BIT(31)
+#define PPE_SRAM_CTRL_DUAL_SUCESS_MASK BIT(30)
+#define PPE_SRAM_CTRL_ENTRY_MASK GENMASK(23, 8)
+#define PPE_SRAM_WR_DUAL_DIRECTION_MASK BIT(2)
+#define PPE_SRAM_CTRL_WR_MASK BIT(1)
+#define PPE_SRAM_CTRL_REQ_MASK BIT(0)
+
+#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
+#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
+
+#define REG_UPDMEM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x370)
+#define PPE_UPDMEM_ACK_MASK BIT(31)
+#define PPE_UPDMEM_ADDR_MASK GENMASK(11, 8)
+#define PPE_UPDMEM_OFFSET_MASK GENMASK(7, 4)
+#define PPE_UPDMEM_SEL_MASK GENMASK(3, 2)
+#define PPE_UPDMEM_WR_MASK BIT(1)
+#define PPE_UPDMEM_REQ_MASK BIT(0)
+
+#define REG_UPDMEM_DATA(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x374)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
+
+#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
+#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
+#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
+
+#define REG_GDM3_FWD_CFG GDM3_BASE
+#define GDM3_PAD_EN_MASK BIT(28)
+
+#define REG_GDM4_FWD_CFG GDM4_BASE
+#define GDM4_PAD_EN_MASK BIT(28)
+#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
+
+#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
+#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
+#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
+#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
+
+#define REG_IP_FRAG_FP 0x2010
+#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
+#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
+#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
+#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
+
+#define REG_MC_VLAN_EN 0x2100
+#define MC_VLAN_EN_MASK BIT(0)
+
+#define REG_MC_VLAN_CFG 0x2104
+#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
+#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
+#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
+#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
+#define MC_VLAN_CFG_RW_MASK BIT(0)
+
+#define REG_MC_VLAN_DATA 0x2108
+
+#define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2))
+#define SP_CPORT_PCIE1_MASK GENMASK(31, 28)
+#define SP_CPORT_PCIE0_MASK GENMASK(27, 24)
+#define SP_CPORT_USB_MASK GENMASK(7, 4)
+#define SP_CPORT_ETH_MASK GENMASK(7, 4)
+
+#define REG_SRC_PORT_FC_MAP6 0x2298
+#define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24)
+#define FC_ID_OF_SRC_PORT26_MASK GENMASK(20, 16)
+#define FC_ID_OF_SRC_PORT25_MASK GENMASK(12, 8)
+#define FC_ID_OF_SRC_PORT24_MASK GENMASK(4, 0)
+
+#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
+
+/* QDMA */
+#define REG_QDMA_GLOBAL_CFG 0x0004
+#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
+#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
+#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
+#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
+#define GLOBAL_CFG_RESET_MASK BIT(23)
+#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
+#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
+#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
+#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
+#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
+#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
+#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
+#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
+#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
+#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
+#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
+#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
+
+#define REG_FWD_DSCP_BASE 0x0010
+#define REG_FWD_BUF_BASE 0x0014
+
+#define REG_HW_FWD_DSCP_CFG 0x0018
+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
+#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
+
+#define REG_INT_STATUS(_n) \
+ (((_n) == 4) ? 0x0730 : \
+ ((_n) == 3) ? 0x0724 : \
+ ((_n) == 2) ? 0x0720 : \
+ ((_n) == 1) ? 0x0024 : 0x0020)
+
+#define REG_INT_ENABLE(_b, _n) \
+ (((_n) == 4) ? 0x0750 + ((_b) << 5) : \
+ ((_n) == 3) ? 0x0744 + ((_b) << 5) : \
+ ((_n) == 2) ? 0x0740 + ((_b) << 5) : \
+ ((_n) == 1) ? 0x002c + ((_b) << 3) : \
+ 0x0028 + ((_b) << 3))
+
+/* QDMA_CSR_INT_ENABLE1 */
+#define RX15_COHERENT_INT_MASK BIT(31)
+#define RX14_COHERENT_INT_MASK BIT(30)
+#define RX13_COHERENT_INT_MASK BIT(29)
+#define RX12_COHERENT_INT_MASK BIT(28)
+#define RX11_COHERENT_INT_MASK BIT(27)
+#define RX10_COHERENT_INT_MASK BIT(26)
+#define RX9_COHERENT_INT_MASK BIT(25)
+#define RX8_COHERENT_INT_MASK BIT(24)
+#define RX7_COHERENT_INT_MASK BIT(23)
+#define RX6_COHERENT_INT_MASK BIT(22)
+#define RX5_COHERENT_INT_MASK BIT(21)
+#define RX4_COHERENT_INT_MASK BIT(20)
+#define RX3_COHERENT_INT_MASK BIT(19)
+#define RX2_COHERENT_INT_MASK BIT(18)
+#define RX1_COHERENT_INT_MASK BIT(17)
+#define RX0_COHERENT_INT_MASK BIT(16)
+#define TX7_COHERENT_INT_MASK BIT(15)
+#define TX6_COHERENT_INT_MASK BIT(14)
+#define TX5_COHERENT_INT_MASK BIT(13)
+#define TX4_COHERENT_INT_MASK BIT(12)
+#define TX3_COHERENT_INT_MASK BIT(11)
+#define TX2_COHERENT_INT_MASK BIT(10)
+#define TX1_COHERENT_INT_MASK BIT(9)
+#define TX0_COHERENT_INT_MASK BIT(8)
+#define CNT_OVER_FLOW_INT_MASK BIT(7)
+#define IRQ1_FULL_INT_MASK BIT(5)
+#define IRQ1_INT_MASK BIT(4)
+#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
+#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
+#define IRQ0_FULL_INT_MASK BIT(1)
+#define IRQ0_INT_MASK BIT(0)
+
+#define RX_COHERENT_LOW_INT_MASK \
+ (RX15_COHERENT_INT_MASK | RX14_COHERENT_INT_MASK | \
+ RX13_COHERENT_INT_MASK | RX12_COHERENT_INT_MASK | \
+ RX11_COHERENT_INT_MASK | RX10_COHERENT_INT_MASK | \
+ RX9_COHERENT_INT_MASK | RX8_COHERENT_INT_MASK | \
+ RX7_COHERENT_INT_MASK | RX6_COHERENT_INT_MASK | \
+ RX5_COHERENT_INT_MASK | RX4_COHERENT_INT_MASK | \
+ RX3_COHERENT_INT_MASK | RX2_COHERENT_INT_MASK | \
+ RX1_COHERENT_INT_MASK | RX0_COHERENT_INT_MASK)
+
+#define RX_COHERENT_LOW_OFFSET __ffs(RX_COHERENT_LOW_INT_MASK)
+#define INT_RX0_MASK(_n) \
+ (((_n) << RX_COHERENT_LOW_OFFSET) & RX_COHERENT_LOW_INT_MASK)
+
+#define TX_COHERENT_LOW_INT_MASK \
+ (TX7_COHERENT_INT_MASK | TX6_COHERENT_INT_MASK | \
+ TX5_COHERENT_INT_MASK | TX4_COHERENT_INT_MASK | \
+ TX3_COHERENT_INT_MASK | TX2_COHERENT_INT_MASK | \
+ TX1_COHERENT_INT_MASK | TX0_COHERENT_INT_MASK)
+
+#define TX_DONE_INT_MASK(_n) \
+ ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
+ : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_TX_MASK \
+ (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
+ IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+/* QDMA_CSR_INT_ENABLE2 */
+#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX15_DONE_INT_MASK BIT(15)
+#define RX14_DONE_INT_MASK BIT(14)
+#define RX13_DONE_INT_MASK BIT(13)
+#define RX12_DONE_INT_MASK BIT(12)
+#define RX11_DONE_INT_MASK BIT(11)
+#define RX10_DONE_INT_MASK BIT(10)
+#define RX9_DONE_INT_MASK BIT(9)
+#define RX8_DONE_INT_MASK BIT(8)
+#define RX7_DONE_INT_MASK BIT(7)
+#define RX6_DONE_INT_MASK BIT(6)
+#define RX5_DONE_INT_MASK BIT(5)
+#define RX4_DONE_INT_MASK BIT(4)
+#define RX3_DONE_INT_MASK BIT(3)
+#define RX2_DONE_INT_MASK BIT(2)
+#define RX1_DONE_INT_MASK BIT(1)
+#define RX0_DONE_INT_MASK BIT(0)
+
+#define RX_NO_CPU_DSCP_LOW_INT_MASK \
+ (RX15_NO_CPU_DSCP_INT_MASK | RX14_NO_CPU_DSCP_INT_MASK | \
+ RX13_NO_CPU_DSCP_INT_MASK | RX12_NO_CPU_DSCP_INT_MASK | \
+ RX11_NO_CPU_DSCP_INT_MASK | RX10_NO_CPU_DSCP_INT_MASK | \
+ RX9_NO_CPU_DSCP_INT_MASK | RX8_NO_CPU_DSCP_INT_MASK | \
+ RX7_NO_CPU_DSCP_INT_MASK | RX6_NO_CPU_DSCP_INT_MASK | \
+ RX5_NO_CPU_DSCP_INT_MASK | RX4_NO_CPU_DSCP_INT_MASK | \
+ RX3_NO_CPU_DSCP_INT_MASK | RX2_NO_CPU_DSCP_INT_MASK | \
+ RX1_NO_CPU_DSCP_INT_MASK | RX0_NO_CPU_DSCP_INT_MASK)
+
+#define RX_DONE_LOW_INT_MASK \
+ (RX15_DONE_INT_MASK | RX14_DONE_INT_MASK | \
+ RX13_DONE_INT_MASK | RX12_DONE_INT_MASK | \
+ RX11_DONE_INT_MASK | RX10_DONE_INT_MASK | \
+ RX9_DONE_INT_MASK | RX8_DONE_INT_MASK | \
+ RX7_DONE_INT_MASK | RX6_DONE_INT_MASK | \
+ RX5_DONE_INT_MASK | RX4_DONE_INT_MASK | \
+ RX3_DONE_INT_MASK | RX2_DONE_INT_MASK | \
+ RX1_DONE_INT_MASK | RX0_DONE_INT_MASK)
+
+#define RX_NO_CPU_DSCP_LOW_OFFSET __ffs(RX_NO_CPU_DSCP_LOW_INT_MASK)
+#define INT_RX1_MASK(_n) \
+ ((((_n) << RX_NO_CPU_DSCP_LOW_OFFSET) & RX_NO_CPU_DSCP_LOW_INT_MASK) | \
+ (RX_DONE_LOW_INT_MASK & (_n)))
+
+/* QDMA_CSR_INT_ENABLE3 */
+#define RX31_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX30_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX29_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX28_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX27_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX26_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX25_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX24_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX23_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX22_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX21_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX20_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX19_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX18_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX17_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX16_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX31_DONE_INT_MASK BIT(15)
+#define RX30_DONE_INT_MASK BIT(14)
+#define RX29_DONE_INT_MASK BIT(13)
+#define RX28_DONE_INT_MASK BIT(12)
+#define RX27_DONE_INT_MASK BIT(11)
+#define RX26_DONE_INT_MASK BIT(10)
+#define RX25_DONE_INT_MASK BIT(9)
+#define RX24_DONE_INT_MASK BIT(8)
+#define RX23_DONE_INT_MASK BIT(7)
+#define RX22_DONE_INT_MASK BIT(6)
+#define RX21_DONE_INT_MASK BIT(5)
+#define RX20_DONE_INT_MASK BIT(4)
+#define RX19_DONE_INT_MASK BIT(3)
+#define RX18_DONE_INT_MASK BIT(2)
+#define RX17_DONE_INT_MASK BIT(1)
+#define RX16_DONE_INT_MASK BIT(0)
+
+#define RX_NO_CPU_DSCP_HIGH_INT_MASK \
+ (RX31_NO_CPU_DSCP_INT_MASK | RX30_NO_CPU_DSCP_INT_MASK | \
+ RX29_NO_CPU_DSCP_INT_MASK | RX28_NO_CPU_DSCP_INT_MASK | \
+ RX27_NO_CPU_DSCP_INT_MASK | RX26_NO_CPU_DSCP_INT_MASK | \
+ RX25_NO_CPU_DSCP_INT_MASK | RX24_NO_CPU_DSCP_INT_MASK | \
+ RX23_NO_CPU_DSCP_INT_MASK | RX22_NO_CPU_DSCP_INT_MASK | \
+ RX21_NO_CPU_DSCP_INT_MASK | RX20_NO_CPU_DSCP_INT_MASK | \
+ RX19_NO_CPU_DSCP_INT_MASK | RX18_NO_CPU_DSCP_INT_MASK | \
+ RX17_NO_CPU_DSCP_INT_MASK | RX16_NO_CPU_DSCP_INT_MASK)
+
+#define RX_DONE_HIGH_INT_MASK \
+ (RX31_DONE_INT_MASK | RX30_DONE_INT_MASK | \
+ RX29_DONE_INT_MASK | RX28_DONE_INT_MASK | \
+ RX27_DONE_INT_MASK | RX26_DONE_INT_MASK | \
+ RX25_DONE_INT_MASK | RX24_DONE_INT_MASK | \
+ RX23_DONE_INT_MASK | RX22_DONE_INT_MASK | \
+ RX21_DONE_INT_MASK | RX20_DONE_INT_MASK | \
+ RX19_DONE_INT_MASK | RX18_DONE_INT_MASK | \
+ RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
+
+#define RX_DONE_HIGH_OFFSET fls(RX_DONE_HIGH_INT_MASK)
+#define RX_DONE_INT_MASK \
+ ((RX_DONE_HIGH_INT_MASK << RX_DONE_HIGH_OFFSET) | RX_DONE_LOW_INT_MASK)
+
+#define INT_RX2_MASK(_n) \
+ ((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) | \
+ (((_n) >> RX_DONE_HIGH_OFFSET) & RX_DONE_HIGH_INT_MASK))
+
+/* QDMA_CSR_INT_ENABLE4 */
+#define RX31_COHERENT_INT_MASK BIT(31)
+#define RX30_COHERENT_INT_MASK BIT(30)
+#define RX29_COHERENT_INT_MASK BIT(29)
+#define RX28_COHERENT_INT_MASK BIT(28)
+#define RX27_COHERENT_INT_MASK BIT(27)
+#define RX26_COHERENT_INT_MASK BIT(26)
+#define RX25_COHERENT_INT_MASK BIT(25)
+#define RX24_COHERENT_INT_MASK BIT(24)
+#define RX23_COHERENT_INT_MASK BIT(23)
+#define RX22_COHERENT_INT_MASK BIT(22)
+#define RX21_COHERENT_INT_MASK BIT(21)
+#define RX20_COHERENT_INT_MASK BIT(20)
+#define RX19_COHERENT_INT_MASK BIT(19)
+#define RX18_COHERENT_INT_MASK BIT(18)
+#define RX17_COHERENT_INT_MASK BIT(17)
+#define RX16_COHERENT_INT_MASK BIT(16)
+
+#define RX_COHERENT_HIGH_INT_MASK \
+ (RX31_COHERENT_INT_MASK | RX30_COHERENT_INT_MASK | \
+ RX29_COHERENT_INT_MASK | RX28_COHERENT_INT_MASK | \
+ RX27_COHERENT_INT_MASK | RX26_COHERENT_INT_MASK | \
+ RX25_COHERENT_INT_MASK | RX24_COHERENT_INT_MASK | \
+ RX23_COHERENT_INT_MASK | RX22_COHERENT_INT_MASK | \
+ RX21_COHERENT_INT_MASK | RX20_COHERENT_INT_MASK | \
+ RX19_COHERENT_INT_MASK | RX18_COHERENT_INT_MASK | \
+ RX17_COHERENT_INT_MASK | RX16_COHERENT_INT_MASK)
+
+#define INT_RX3_MASK(_n) (RX_COHERENT_HIGH_INT_MASK & (_n))
+
+/* QDMA_CSR_INT_ENABLE5 */
+#define TX31_COHERENT_INT_MASK BIT(31)
+#define TX30_COHERENT_INT_MASK BIT(30)
+#define TX29_COHERENT_INT_MASK BIT(29)
+#define TX28_COHERENT_INT_MASK BIT(28)
+#define TX27_COHERENT_INT_MASK BIT(27)
+#define TX26_COHERENT_INT_MASK BIT(26)
+#define TX25_COHERENT_INT_MASK BIT(25)
+#define TX24_COHERENT_INT_MASK BIT(24)
+#define TX23_COHERENT_INT_MASK BIT(23)
+#define TX22_COHERENT_INT_MASK BIT(22)
+#define TX21_COHERENT_INT_MASK BIT(21)
+#define TX20_COHERENT_INT_MASK BIT(20)
+#define TX19_COHERENT_INT_MASK BIT(19)
+#define TX18_COHERENT_INT_MASK BIT(18)
+#define TX17_COHERENT_INT_MASK BIT(17)
+#define TX16_COHERENT_INT_MASK BIT(16)
+#define TX15_COHERENT_INT_MASK BIT(15)
+#define TX14_COHERENT_INT_MASK BIT(14)
+#define TX13_COHERENT_INT_MASK BIT(13)
+#define TX12_COHERENT_INT_MASK BIT(12)
+#define TX11_COHERENT_INT_MASK BIT(11)
+#define TX10_COHERENT_INT_MASK BIT(10)
+#define TX9_COHERENT_INT_MASK BIT(9)
+#define TX8_COHERENT_INT_MASK BIT(8)
+
+#define TX_COHERENT_HIGH_INT_MASK \
+ (TX31_COHERENT_INT_MASK | TX30_COHERENT_INT_MASK | \
+ TX29_COHERENT_INT_MASK | TX28_COHERENT_INT_MASK | \
+ TX27_COHERENT_INT_MASK | TX26_COHERENT_INT_MASK | \
+ TX25_COHERENT_INT_MASK | TX24_COHERENT_INT_MASK | \
+ TX23_COHERENT_INT_MASK | TX22_COHERENT_INT_MASK | \
+ TX21_COHERENT_INT_MASK | TX20_COHERENT_INT_MASK | \
+ TX19_COHERENT_INT_MASK | TX18_COHERENT_INT_MASK | \
+ TX17_COHERENT_INT_MASK | TX16_COHERENT_INT_MASK | \
+ TX15_COHERENT_INT_MASK | TX14_COHERENT_INT_MASK | \
+ TX13_COHERENT_INT_MASK | TX12_COHERENT_INT_MASK | \
+ TX11_COHERENT_INT_MASK | TX10_COHERENT_INT_MASK | \
+ TX9_COHERENT_INT_MASK | TX8_COHERENT_INT_MASK)
+
+#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
+
+#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
+#define TX_IRQ_THR_MASK GENMASK(27, 16)
+#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
+
+#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
+#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
+
+#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
+#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
+#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
+
+#define REG_TX_RING_BASE(_n) \
+ (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
+
+#define REG_TX_RING_BLOCKING(_n) \
+ (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
+
+#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
+#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
+#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
+#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
+#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
+
+#define REG_TX_CPU_IDX(_n) \
+ (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
+
+#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_TX_DMA_IDX(_n) \
+ (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
+
+#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define IRQ_RING_IDX_MASK GENMASK(20, 16)
+#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_RING_BASE(_n) \
+ (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
+
+#define REG_RX_RING_SIZE(_n) \
+ (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
+
+#define RX_RING_THR_MASK GENMASK(31, 16)
+#define RX_RING_SIZE_MASK GENMASK(15, 0)
+
+#define REG_RX_CPU_IDX(_n) \
+ (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
+
+#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_DMA_IDX(_n) \
+ (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
+
+#define REG_RX_DELAY_INT_IDX(_n) \
+ (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
+
+#define REG_RX_SCATTER_CFG(_n) \
+ (((_n) < 16) ? 0x0214 + ((_n) << 5) : 0x0e14 + (((_n) - 16) << 5))
+
+#define RX_DELAY_INT_MASK GENMASK(15, 0)
+
+#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define RX_RING_SG_EN_MASK BIT(0)
+
+#define REG_INGRESS_TRTCM_CFG 0x0070
+#define INGRESS_TRTCM_EN_MASK BIT(31)
+#define INGRESS_TRTCM_MODE_MASK BIT(30)
+#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
+
+#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
+#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
+
+#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
+#define CNTR_EN_MASK BIT(31)
+#define CNTR_ALL_CHAN_EN_MASK BIT(30)
+#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
+#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
+#define CNTR_SRC_MASK GENMASK(27, 24)
+#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
+#define CNTR_CHAN_MASK GENMASK(7, 3)
+#define CNTR_QUEUE_MASK GENMASK(2, 0)
+
+#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
+
+#define REG_LMGR_INIT_CFG 0x1000
+#define LMGR_INIT_START BIT(31)
+#define LMGR_SRAM_MODE_MASK BIT(30)
+#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
+#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
+
+#define REG_FWD_DSCP_LOW_THR 0x1004
+#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
+
+#define REG_EGRESS_RATE_METER_CFG 0x100c
+#define EGRESS_RATE_METER_EN_MASK BIT(31)
+#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
+#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
+#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
+
+#define REG_EGRESS_TRTCM_CFG 0x1010
+#define EGRESS_TRTCM_EN_MASK BIT(31)
+#define EGRESS_TRTCM_MODE_MASK BIT(30)
+#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define TRTCM_PARAM_RW_MASK BIT(31)
+#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
+#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
+#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
+#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
+#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
+
+#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
+#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
+#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+
+#define RATE_LIMIT_PARAM_RW_MASK BIT(31)
+#define RATE_LIMIT_PARAM_RW_DONE_MASK BIT(30)
+#define RATE_LIMIT_PARAM_TYPE_MASK GENMASK(29, 28)
+#define RATE_LIMIT_METER_GROUP_MASK GENMASK(27, 26)
+#define RATE_LIMIT_PARAM_INDEX_MASK GENMASK(23, 16)
+
+#define REG_TXWRR_MODE_CFG 0x1020
+#define TWRR_WEIGHT_SCALE_MASK BIT(31)
+#define TWRR_WEIGHT_BASE_MASK BIT(3)
+
+#define REG_TXWRR_WEIGHT_CFG 0x1024
+#define TWRR_RW_CMD_MASK BIT(31)
+#define TWRR_RW_CMD_DONE BIT(30)
+#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
+#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
+#define TWRR_VALUE_MASK GENMASK(15, 0)
+
+#define REG_PSE_BUF_USAGE_CFG 0x1028
+#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
+
+#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
+#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
+
+#define REG_GLB_TRTCM_CFG 0x1080
+#define GLB_TRTCM_EN_MASK BIT(31)
+#define GLB_TRTCM_MODE_MASK BIT(30)
+#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define GLB_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXQ_CNGST_CFG 0x10a0
+#define TXQ_CNGST_DROP_EN BIT(31)
+#define TXQ_CNGST_DEI_DROP_EN BIT(30)
+
+#define REG_SLA_TRTCM_CFG 0x1150
+#define SLA_TRTCM_EN_MASK BIT(31)
+#define SLA_TRTCM_MODE_MASK BIT(30)
+#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define SLA_FAST_TICK_MASK GENMASK(15, 0)
+
+/* CTRL */
+#define QDMA_DESC_DONE_MASK BIT(31)
+#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
+#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
+#define QDMA_DESC_DEI_MASK BIT(25)
+#define QDMA_DESC_NO_DROP_MASK BIT(24)
+#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
+/* DATA */
+#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
+/* TX MSG0 */
+#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
+#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
+#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
+#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
+#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
+#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
+#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
+#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
+#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
+#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
+/* TX MSG1 */
+#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
+#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
+#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
+#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
+#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
+#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
+#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
+#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
+#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
+
+/* RX MSG0 */
+#define QDMA_ETH_RXMSG_SPTAG GENMASK(21, 14)
+/* RX MSG1 */
+#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
+#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
+#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
+#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
+#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
+#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
+#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
+#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
+
+struct airoha_qdma_desc {
+ __le32 rsv;
+ __le32 ctrl;
+ __le32 addr;
+ __le32 data;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 msg2;
+ __le32 msg3;
+};
+
+/* CTRL0 */
+#define QDMA_FWD_DESC_CTX_MASK BIT(31)
+#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
+#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
+#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
+/* CTRL1 */
+#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
+/* CTRL2 */
+#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
+
+struct airoha_qdma_fwd_desc {
+ __le32 addr;
+ __le32 ctrl0;
+ __le32 ctrl1;
+ __le32 ctrl2;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 rsv0;
+ __le32 rsv1;
+};
+
+#endif /* AIROHA_REGS_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 63c8a2328142..86fd08f375df 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -5,9 +5,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#ifdef CONFIG_RFS_ACCEL
-#include <linux/cpu_rmap.h>
-#endif /* CONFIG_RFS_ACCEL */
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -74,7 +71,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
if (threshold < time_since_last_napi && napi_scheduled) {
netdev_err(dev,
"napi handler hasn't been called for a long time but is scheduled\n");
- reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
}
schedule_reset:
/* Change the state of the device to trigger reset
@@ -162,30 +159,6 @@ int ena_xmit_common(struct ena_adapter *adapter,
return 0;
}
-static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
-{
-#ifdef CONFIG_RFS_ACCEL
- u32 i;
- int rc;
-
- adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
- if (!adapter->netdev->rx_cpu_rmap)
- return -ENOMEM;
- for (i = 0; i < adapter->num_io_queues; i++) {
- int irq_idx = ENA_IO_IRQ_IDX(i);
-
- rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
- pci_irq_vector(adapter->pdev, irq_idx));
- if (rc) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- return rc;
- }
- }
-#endif /* CONFIG_RFS_ACCEL */
- return 0;
-}
-
static void ena_init_io_rings_common(struct ena_adapter *adapter,
struct ena_ring *ring, u16 qid)
{
@@ -1596,7 +1569,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
- if (ena_init_rx_cpu_rmap(adapter))
+ if (netif_enable_cpu_rmap(adapter->netdev, adapter->num_io_queues))
netif_warn(adapter, probe, adapter->netdev,
"Failed to map IRQs to CPUs\n");
@@ -1742,16 +1715,13 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
struct ena_irq *irq;
int i;
-#ifdef CONFIG_RFS_ACCEL
- if (adapter->msix_vecs >= 1) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- }
-#endif /* CONFIG_RFS_ACCEL */
-
for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
+ struct ena_napi *ena_napi;
+
irq = &adapter->irq_tbl[i];
irq_set_affinity_hint(irq->vector, NULL);
+ ena_napi = irq->data;
+ netif_napi_set_irq(&ena_napi->napi, -1);
free_irq(irq->vector, irq->data);
}
}
@@ -1807,7 +1777,7 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
if (ENA_IS_XDP_INDEX(adapter, i))
napi_handler = ena_xdp_io_poll;
- netif_napi_add(adapter->netdev, &napi->napi, napi_handler);
+ netif_napi_add_config(adapter->netdev, &napi->napi, napi_handler, i);
if (!ENA_IS_XDP_INDEX(adapter, i))
napi->rx_ring = rx_ring;
@@ -3275,7 +3245,7 @@ static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
netif_carrier_off(netdev);
- del_timer_sync(&adapter->timer_service);
+ timer_delete_sync(&adapter->timer_service);
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
adapter->dev_up_before_reset = dev_up;
@@ -3697,7 +3667,8 @@ static void ena_update_host_info(struct ena_admin_host_info *host_info,
static void ena_timer_service(struct timer_list *t)
{
- struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
+ struct ena_adapter *adapter = timer_container_of(adapter, t,
+ timer_service);
u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
struct ena_admin_host_info *host_info =
adapter->ena_dev->host_attr.host_info;
@@ -4095,7 +4066,7 @@ err_free_msix:
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_worker_destroy:
- del_timer(&adapter->timer_service);
+ timer_delete(&adapter->timer_service);
err_device_destroy:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
@@ -4131,17 +4102,10 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_dev = adapter->ena_dev;
netdev = adapter->netdev;
-#ifdef CONFIG_RFS_ACCEL
- if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
- }
-
-#endif /* CONFIG_RFS_ACCEL */
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
- del_timer_sync(&adapter->timer_service);
+ timer_delete_sync(&adapter->timer_service);
cancel_work_sync(&adapter->reset_task);
rtnl_lock(); /* lock released inside the below if-else block */
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 1ca26a8c40eb..ce9445425045 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -486,7 +486,7 @@ static int lance_close(struct net_device *dev)
volatile struct lance_regs *ll = lp->ll;
netif_stop_queue(dev);
- del_timer_sync(&lp->multicast_timer);
+ timer_delete_sync(&lp->multicast_timer);
/* Stop the card */
ll->rap = LE_CSR0;
@@ -636,7 +636,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
lance_set_multicast(lp->dev);
}
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index f64f96fa17cf..76e8c13d5985 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1173,7 +1173,7 @@ static int amd8111e_close(struct net_device *dev)
/* Delete ipg timer */
if (lp->options & OPTION_DYN_IPG_ENABLE)
- del_timer_sync(&lp->ipg_data.ipg_timer);
+ timer_delete_sync(&lp->ipg_data.ipg_timer);
spin_unlock_irq(&lp->lock);
free_irq(dev->irq, dev);
@@ -1598,7 +1598,7 @@ static int __maybe_unused amd8111e_suspend(struct device *dev_d)
/* stop chip */
spin_lock_irq(&lp->lock);
if (lp->options & OPTION_DYN_IPG_ENABLE)
- del_timer_sync(&lp->ipg_data.ipg_timer);
+ timer_delete_sync(&lp->ipg_data.ipg_timer);
amd8111e_stop_chip(lp);
spin_unlock_irq(&lp->lock);
@@ -1641,7 +1641,8 @@ static int __maybe_unused amd8111e_resume(struct device *dev_d)
static void amd8111e_config_ipg(struct timer_list *t)
{
- struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
+ struct amd8111e_priv *lp = timer_container_of(lp, t,
+ ipg_data.ipg_timer);
struct ipg_info *ipg_data = &lp->ipg_data;
void __iomem *mmio = lp->mmio;
unsigned int prev_col_cnt = ipg_data->col_cnt;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 0671a066913b..9d35ac348ebe 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -571,7 +571,7 @@ static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
return pDB;
}
-void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
+static void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
{
struct db_dest *pDBfree = aup->pDBfree;
if (pDBfree)
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index ec8df05e7bf6..8d05a0c5f2d5 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -842,7 +842,7 @@ static int lance_close(struct net_device *dev)
volatile struct lance_regs *ll = lp->ll;
netif_stop_queue(dev);
- del_timer_sync(&lp->multicast_timer);
+ timer_delete_sync(&lp->multicast_timer);
/* Stop the card */
writereg(&ll->rap, LE_CSR0);
@@ -1004,7 +1004,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
struct net_device *dev = lp->dev;
lance_set_multicast(dev);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 72db9f9e7bee..9eaefa0f5e80 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -462,7 +462,7 @@ static void pcnet32_netif_start(struct net_device *dev)
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
}
/*
@@ -889,6 +889,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -920,6 +921,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
lp->rx_ring_size, lp->tx_ring_size);
@@ -985,6 +987,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -1122,6 +1125,7 @@ clean_up:
lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return rc;
} /* end pcnet32_loopback_test */
@@ -2101,6 +2105,7 @@ static int pcnet32_open(struct net_device *dev)
return -EAGAIN;
}
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
/* Check for a valid station address */
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -2266,7 +2271,7 @@ static int pcnet32_open(struct net_device *dev)
goto err_free_ring;
}
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
/* Re-initialize the PCNET32, and start it when done. */
lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
@@ -2300,6 +2305,7 @@ static int pcnet32_open(struct net_device *dev)
lp->a->read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return 0; /* Always succeed */
@@ -2315,6 +2321,7 @@ err_free_ring:
err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
free_irq(dev->irq, dev);
return rc;
}
@@ -2623,7 +2630,7 @@ static int pcnet32_close(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
- del_timer_sync(&lp->watchdog_timer);
+ timer_delete_sync(&lp->watchdog_timer);
netif_stop_queue(dev);
napi_disable(&lp->napi);
@@ -2898,7 +2905,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
static void pcnet32_watchdog(struct timer_list *t)
{
- struct pcnet32_private *lp = from_timer(lp, t, watchdog_timer);
+ struct pcnet32_private *lp = timer_container_of(lp, t, watchdog_timer);
struct net_device *dev = lp->dev;
unsigned long flags;
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index c83a0a80d533..097bb092bdb8 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -5,11 +5,6 @@
#include "core.h"
-struct pdsc_wait_context {
- struct pdsc_qcq *qcq;
- struct completion wait_completion;
-};
-
static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
{
union pds_core_notifyq_comp *comp;
@@ -109,10 +104,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
q_info = &q->info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- /* Copy out the completion data */
- memcpy(q_info->dest, comp, sizeof(*comp));
-
- complete_all(&q_info->wc->wait_completion);
+ if (!completion_done(&q_info->completion)) {
+ memcpy(q_info->dest, comp, sizeof(*comp));
+ complete(&q_info->completion);
+ }
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
@@ -162,8 +157,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
static int __pdsc_adminq_post(struct pdsc *pdsc,
struct pdsc_qcq *qcq,
union pds_core_adminq_cmd *cmd,
- union pds_core_adminq_comp *comp,
- struct pdsc_wait_context *wc)
+ union pds_core_adminq_comp *comp)
{
struct pdsc_queue *q = &qcq->q;
struct pdsc_q_info *q_info;
@@ -205,9 +199,9 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
/* Post the request */
index = q->head_idx;
q_info = &q->info[index];
- q_info->wc = wc;
q_info->dest = comp;
memcpy(q_info->desc, cmd, sizeof(*cmd));
+ reinit_completion(&q_info->completion);
dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
q->head_idx, q->tail_idx);
@@ -231,16 +225,13 @@ int pdsc_adminq_post(struct pdsc *pdsc,
union pds_core_adminq_comp *comp,
bool fast_poll)
{
- struct pdsc_wait_context wc = {
- .wait_completion =
- COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
- };
- unsigned long poll_interval = 1;
+ unsigned long poll_interval = 200;
unsigned long poll_jiffies;
unsigned long time_limit;
unsigned long time_start;
unsigned long time_done;
unsigned long remaining;
+ struct completion *wc;
int err = 0;
int index;
@@ -250,20 +241,19 @@ int pdsc_adminq_post(struct pdsc *pdsc,
return -ENXIO;
}
- wc.qcq = &pdsc->adminqcq;
- index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
+ index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp);
if (index < 0) {
err = index;
goto err_out;
}
+ wc = &pdsc->adminqcq.q.info[index].completion;
time_start = jiffies;
time_limit = time_start + HZ * pdsc->devcmd_timeout;
do {
/* Timeslice the actual wait to catch IO errors etc early */
- poll_jiffies = msecs_to_jiffies(poll_interval);
- remaining = wait_for_completion_timeout(&wc.wait_completion,
- poll_jiffies);
+ poll_jiffies = usecs_to_jiffies(poll_interval);
+ remaining = wait_for_completion_timeout(wc, poll_jiffies);
if (remaining)
break;
@@ -292,9 +282,11 @@ int pdsc_adminq_post(struct pdsc *pdsc,
dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
__func__, jiffies_to_msecs(time_done - time_start));
- /* Check the results */
- if (time_after_eq(time_done, time_limit))
+ /* Check the results and clear an un-completed timeout */
+ if (time_after_eq(time_done, time_limit) && !completion_done(wc)) {
err = -ETIMEDOUT;
+ complete(wc);
+ }
dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 2babea110991..92f359f2b449 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -107,9 +107,6 @@ int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
dev_dbg(pf->dev, "%s: %s opcode %d\n",
__func__, dev_name(&padev->aux_dev.dev), req->opcode);
- if (pf->state)
- return -ENXIO;
-
/* Wrap the client's request */
cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
cmd.client_request.client_id = cpu_to_le16(padev->client_id);
@@ -175,34 +172,31 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
return padev;
}
-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
- int err = 0;
- if (!cf)
- return -ENODEV;
+ if (!*pd_ptr)
+ return;
mutex_lock(&pf->config_lock);
- padev = pf->vfs[cf->vf_id].padev;
- if (padev) {
- pds_client_unregister(pf, padev->client_id);
- auxiliary_device_delete(&padev->aux_dev);
- auxiliary_device_uninit(&padev->aux_dev);
- padev->client_id = 0;
- }
- pf->vfs[cf->vf_id].padev = NULL;
+ padev = *pd_ptr;
+ pds_client_unregister(pf, padev->client_id);
+ auxiliary_device_delete(&padev->aux_dev);
+ auxiliary_device_uninit(&padev->aux_dev);
+ *pd_ptr = NULL;
mutex_unlock(&pf->config_lock);
- return err;
}
-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
char devname[PDS_DEVNAME_LEN];
- enum pds_core_vif_types vt;
unsigned long mask;
u16 vt_support;
int client_id;
@@ -211,6 +205,9 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
if (!cf)
return -ENODEV;
+ if (vt >= PDS_DEV_TYPE_MAX)
+ return -EINVAL;
+
mutex_lock(&pf->config_lock);
mask = BIT_ULL(PDSC_S_FW_DEAD) |
@@ -222,17 +219,10 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
goto out_unlock;
}
- /* We only support vDPA so far, so it is the only one to
- * be verified that it is available in the Core device and
- * enabled in the devlink param. In the future this might
- * become a loop for several VIF types.
- */
-
/* Verify that the type is supported and enabled. It is not
- * an error if there is no auxbus device support for this
- * VF, it just means something else needs to happen with it.
+ * an error if the firmware doesn't support the feature, the
+ * driver just won't set up an auxiliary_device for it.
*/
- vt = PDS_DEV_TYPE_VDPA;
vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
if (!(vt_support &&
pf->viftype_status[vt].supported &&
@@ -258,7 +248,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
err = PTR_ERR(padev);
goto out_unlock;
}
- pf->vfs[cf->vf_id].padev = padev;
+ *pd_ptr = padev;
out_unlock:
mutex_unlock(&pf->config_lock);
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 536635e57727..076dfe2910c7 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -167,8 +167,10 @@ static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
q->base = base;
q->base_pa = base_pa;
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
cur->desc = base + (i * q->desc_size);
+ init_completion(&cur->completion);
+ }
}
static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
@@ -325,10 +327,7 @@ static int pdsc_core_init(struct pdsc *pdsc)
size_t sz;
int err;
- /* Scale the descriptor ring length based on number of CPUs and VFs */
- numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
- numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
- numdescs = roundup_pow_of_two(numdescs);
+ numdescs = PDSC_ADMINQ_MAX_LENGTH;
err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
numdescs,
@@ -402,6 +401,10 @@ err_out_uninit:
}
static struct pdsc_viftype pdsc_viftype_defaults[] = {
+ [PDS_DEV_TYPE_FWCTL] = { .name = PDS_DEV_TYPE_FWCTL_STR,
+ .enabled = true,
+ .vif_id = PDS_DEV_TYPE_FWCTL,
+ .dl_id = -1 },
[PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
.vif_id = PDS_DEV_TYPE_VDPA,
.dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
@@ -412,7 +415,8 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
{
enum pds_core_vif_types vt;
- pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults),
+ pdsc->viftype_status = kcalloc(ARRAY_SIZE(pdsc_viftype_defaults),
+ sizeof(*pdsc->viftype_status),
GFP_KERNEL);
if (!pdsc->viftype_status)
return -ENOMEM;
@@ -428,6 +432,7 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
/* See what the Core device has for support */
vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
+
dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
pdsc->viftype_status[vt].name,
vt_support ? "" : "not ");
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 14522d6d5f86..0b53a1fab46d 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -16,7 +16,7 @@
#define PDSC_WATCHDOG_SECS 5
#define PDSC_QUEUE_NAME_MAX_SZ 16
-#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
+#define PDSC_ADMINQ_MAX_LENGTH 16 /* must be a power of two */
#define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
#define PDSC_TEARDOWN_RECOVERY false
#define PDSC_TEARDOWN_REMOVING true
@@ -96,7 +96,7 @@ struct pdsc_q_info {
unsigned int bytes;
unsigned int nbufs;
struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS];
- struct pdsc_wait_context *wc;
+ struct completion completion;
void *dest;
};
@@ -156,6 +156,7 @@ struct pdsc {
struct dentry *dentry;
struct device *dev;
struct pdsc_dev_bar bars[PDS_CORE_BARS_MAX];
+ struct pds_auxiliary_dev *padev;
struct pdsc_vf *vfs;
int num_vfs;
int vf_id;
@@ -303,8 +304,11 @@ void pdsc_health_thread(struct work_struct *work);
int pdsc_register_notify(struct notifier_block *nb);
void pdsc_unregister_notify(struct notifier_block *nb);
void pdsc_notify(unsigned long event, void *data);
-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf);
-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf);
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr);
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr);
void pdsc_process_adminq(struct pdsc_qcq *qcq);
void pdsc_work_thread(struct work_struct *work);
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
index ac37a4e738ae..04c5e3abd8d7 100644
--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -154,8 +154,9 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_u32("index", 0400, intr_dentry, &intr->index);
debugfs_create_u32("vector", 0400, intr_dentry, &intr->vector);
- intr_ctrl_regset = kzalloc(sizeof(*intr_ctrl_regset),
- GFP_KERNEL);
+ intr_ctrl_regset = devm_kzalloc(pdsc->dev,
+ sizeof(*intr_ctrl_regset),
+ GFP_KERNEL);
if (!intr_ctrl_regset)
return;
intr_ctrl_regset->regs = intr_ctrl_regs;
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 2681889162a2..d8dc39da4161 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -56,8 +56,11 @@ int pdsc_dl_enable_set(struct devlink *dl, u32 id,
for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
struct pdsc *vf = pdsc->vfs[vf_id].vf;
- err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) :
- pdsc_auxbus_dev_del(vf, pdsc);
+ if (ctx->val.vbool)
+ err = pdsc_auxbus_dev_add(vf, pdsc, vt_entry->vif_id,
+ &pdsc->vfs[vf_id].padev);
+ else
+ pdsc_auxbus_dev_del(vf, pdsc, &pdsc->vfs[vf_id].padev);
}
return err;
@@ -102,7 +105,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
.fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
.fw_control.oper = PDS_CORE_FW_GET_LIST,
};
- struct pds_core_fw_list_info fw_list;
+ struct pds_core_fw_list_info fw_list = {};
struct pdsc *pdsc = devlink_priv(dl);
union pds_core_dev_comp comp;
char buf[32];
@@ -115,10 +118,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (!err)
memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
mutex_unlock(&pdsc->devcmd_lock);
- if (err && err != -EIO)
- return err;
- listlen = fw_list.num_fw_slots;
+ listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
for (i = 0; i < listlen; i++) {
if (i < ARRAY_SIZE(fw_slotnames))
strscpy(buf, fw_slotnames[i], sizeof(buf));
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 660268ff9562..9b81e1c260c2 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -23,7 +23,7 @@ MODULE_DEVICE_TABLE(pci, pdsc_id_table);
static void pdsc_wdtimer_cb(struct timer_list *t)
{
- struct pdsc *pdsc = from_timer(pdsc, t, wdtimer);
+ struct pdsc *pdsc = timer_container_of(pdsc, t, wdtimer);
dev_dbg(pdsc->dev, "%s: jiffies %ld\n", __func__, jiffies);
mod_timer(&pdsc->wdtimer,
@@ -190,7 +190,8 @@ static int pdsc_init_vf(struct pdsc *vf)
devl_unlock(dl);
pf->vfs[vf->vf_id].vf = vf;
- err = pdsc_auxbus_dev_add(vf, pf);
+ err = pdsc_auxbus_dev_add(vf, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[vf->vf_id].padev);
if (err) {
devl_lock(dl);
devl_unregister(dl);
@@ -264,6 +265,10 @@ static int pdsc_init_pf(struct pdsc *pdsc)
mutex_unlock(&pdsc->config_lock);
+ err = pdsc_auxbus_dev_add(pdsc, pdsc, PDS_DEV_TYPE_FWCTL, &pdsc->padev);
+ if (err)
+ goto err_out_stop;
+
dl = priv_to_devlink(pdsc);
devl_lock(dl);
err = devl_params_register(dl, pdsc_dl_params,
@@ -272,7 +277,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
devl_unlock(dl);
dev_warn(pdsc->dev, "Failed to register devlink params: %pe\n",
ERR_PTR(err));
- goto err_out_stop;
+ goto err_out_del_dev;
}
hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, 0, pdsc);
@@ -295,6 +300,8 @@ static int pdsc_init_pf(struct pdsc *pdsc)
err_out_unreg_params:
devlink_params_unregister(dl, pdsc_dl_params,
ARRAY_SIZE(pdsc_dl_params));
+err_out_del_dev:
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
err_out_stop:
pdsc_stop(pdsc);
err_out_teardown:
@@ -417,7 +424,7 @@ static void pdsc_remove(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf)) {
- pdsc_auxbus_dev_del(pdsc, pf);
+ pdsc_auxbus_dev_del(pdsc, pf, &pf->vfs[pdsc->vf_id].padev);
pf->vfs[pdsc->vf_id].vf = NULL;
}
} else {
@@ -426,6 +433,7 @@ static void pdsc_remove(struct pci_dev *pdev)
* shut themselves down.
*/
pdsc_sriov_configure(pdev, 0);
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->wq)
@@ -482,7 +490,10 @@ static void pdsc_reset_prepare(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
- pdsc_auxbus_dev_del(pdsc, pf);
+ pdsc_auxbus_dev_del(pdsc, pf,
+ &pf->vfs[pdsc->vf_id].padev);
+ } else {
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
}
pdsc_unmap_bars(pdsc);
@@ -527,7 +538,11 @@ static void pdsc_reset_done(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
- pdsc_auxbus_dev_add(pdsc, pf);
+ pdsc_auxbus_dev_add(pdsc, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[pdsc->vf_id].padev);
+ } else {
+ pdsc_auxbus_dev_add(pdsc, pdsc, PDS_DEV_TYPE_FWCTL,
+ &pdsc->padev);
}
}
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 0f98b92408ed..0b273327f5a6 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -963,7 +963,7 @@ static int lance_close(struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
netif_stop_queue(dev);
- del_timer_sync(&lp->multicast_timer);
+ timer_delete_sync(&lp->multicast_timer);
STOP_LANCE(lp);
@@ -1246,7 +1246,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
struct net_device *dev = lp->dev;
lance_set_multicast(dev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 3b70f6737633..9316de4126cf 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#ifndef __XGBE_COMMON_H__
@@ -900,6 +791,11 @@
#define PCS_V2_RV_WINDOW_SELECT 0x1064
#define PCS_V2_YC_WINDOW_DEF 0x18060
#define PCS_V2_YC_WINDOW_SELECT 0x18064
+#define PCS_V3_RN_WINDOW_DEF 0xf8078
+#define PCS_V3_RN_WINDOW_SELECT 0xf807c
+
+#define PCS_RN_SMN_BASE_ADDR 0x11e00000
+#define PCS_RN_PORT_ADDR_SIZE 0x100000
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
@@ -1373,6 +1269,8 @@
#define MDIO_VEND2_CTRL1_SS13 BIT(13)
#endif
+#define XGBE_VEND2_MAC_AUTO_SW BIT(9)
+
/* MDIO mask values */
#define XGBE_AN_CL73_INT_CMPLT BIT(0)
#define XGBE_AN_CL73_INC_LINK BIT(1)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index c68ace804e37..1474df5544fa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index b0a6c96b6ef4..d9157c4acde9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/debugfs.h>
@@ -505,21 +396,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
{
- char *buf;
-
- if (!pdata->xgbe_debugfs)
- return;
-
- buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
- if (!buf)
- return;
-
- if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf))
- goto out;
-
- debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs,
- pdata->xgbe_debugfs->d_parent, buf);
-
-out:
- kfree(buf);
+ debugfs_change_name(pdata->xgbe_debugfs,
+ "amd-xgbe-%s", pdata->netdev->name);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 230726d7b74f..7c8a19988a52 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include "xgbe.h"
@@ -373,8 +264,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
}
/* Set up the header page info */
- xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
- XGBE_SKB_ALLOC_SIZE);
+ if (pdata->netdev->features & NETIF_F_RXCSUM) {
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+ } else {
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ pdata->rx_buf_size);
+ }
/* Set up the buffer page info */
xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index f393228d41c7..466b5f6e5578 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/phy.h>
@@ -120,9 +11,11 @@
#include <linux/bitrev.h>
#include <linux/crc32.h>
#include <linux/crc32poly.h>
+#include <linux/pci.h>
#include "xgbe.h"
#include "xgbe-common.h"
+#include "xgbe-smn.h"
static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
{
@@ -320,6 +213,18 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
}
+static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
+ }
+}
+
static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
unsigned int index, unsigned int val)
{
@@ -1150,18 +1055,19 @@ static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
return 0;
}
-static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
- int mmd_reg)
+static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata,
+ int mmd_reg)
{
- unsigned long flags;
- unsigned int mmd_address, index, offset;
- int mmd_data;
-
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ return (mmd_reg & XGBE_ADDR_C45) ?
+ mmd_reg & ~XGBE_ADDR_C45 :
+ (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+}
+static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata,
+ unsigned int mmd_address,
+ unsigned int *index,
+ unsigned int *offset)
+{
/* The PCS registers are accessed using mmio. The underlying
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
@@ -1172,8 +1078,98 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
* offset 1 bit and reading 16 bits of data.
*/
mmd_address <<= 1;
- index = mmd_address & ~pdata->xpcs_window_mask;
- offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+ *index = mmd_address & ~pdata->xpcs_window_mask;
+ *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+}
+
+static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ u32 smn_address;
+ int mmd_data;
+ int ret;
+
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
+
+ smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret)
+ return ret;
+
+ ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data);
+ if (ret)
+ return ret;
+
+ mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) :
+ FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int pci_mmd_data, hi_mask, lo_mask;
+ unsigned int mmd_address, index, offset;
+ struct pci_dev *dev;
+ u32 smn_address;
+ int ret;
+
+ dev = pdata->pcidev;
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
+
+ smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", index);
+ return;
+ }
+
+ ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data);
+ if (ret) {
+ pci_err(dev, "Failed to read data\n");
+ return;
+ }
+
+ if (offset % 4) {
+ hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data);
+ lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data);
+ } else {
+ hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK,
+ FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data));
+ lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
+ }
+
+ pci_mmd_data = hi_mask | lo_mask;
+
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", index);
+ return;
+ }
+
+ ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data);
+ return;
+ }
+}
+
+static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ unsigned long flags;
+ int mmd_data;
+
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1189,23 +1185,9 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned long flags;
unsigned int mmd_address, index, offset;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
- /* The PCS registers are accessed using mmio. The underlying
- * management interface uses indirect addressing to access the MMD
- * register sets. This requires accessing of the PCS register in two
- * phases, an address phase and a data phase.
- *
- * The mmio interface is based on 16-bit offsets and values. All
- * register offsets must therefore be adjusted by left shifting the
- * offset 1 bit and writing 16 bits of data.
- */
- mmd_address <<= 1;
- index = mmd_address & ~pdata->xpcs_window_mask;
- offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1220,10 +1202,7 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
int mmd_data;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1248,10 +1227,7 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
unsigned long flags;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1278,6 +1254,9 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
+
+ case XGBE_XPCS_ACCESS_V3:
+ return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg);
}
}
@@ -1288,6 +1267,9 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V1:
return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
+ case XGBE_XPCS_ACCESS_V3:
+ return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data);
+
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
@@ -3545,8 +3527,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
- xgbe_config_sph_mode(pdata);
- xgbe_config_rss(pdata);
+
+ if (pdata->netdev->features & NETIF_F_RXCSUM) {
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
+ }
+
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
@@ -3702,5 +3688,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->disable_vxlan = xgbe_disable_vxlan;
hw_if->set_vxlan_id = xgbe_set_vxlan_id;
+ /* For Split Header*/
+ hw_if->enable_sph = xgbe_config_sph_mode;
+ hw_if->disable_sph = xgbe_disable_sph_mode;
+
DBGPR("<--xgbe_init_function_ptrs\n");
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 5475867708f4..65447f9a0a59 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -643,7 +534,8 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
static void xgbe_tx_timer(struct timer_list *t)
{
- struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
+ struct xgbe_channel *channel = timer_container_of(channel, t,
+ tx_timer);
struct xgbe_prv_data *pdata = channel->pdata;
struct napi_struct *napi;
@@ -681,7 +573,8 @@ static void xgbe_service(struct work_struct *work)
static void xgbe_service_timer(struct timer_list *t)
{
- struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
+ struct xgbe_prv_data *pdata = timer_container_of(pdata, t,
+ service_timer);
struct xgbe_channel *channel;
unsigned int i;
@@ -728,7 +621,7 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
struct xgbe_channel *channel;
unsigned int i;
- del_timer_sync(&pdata->service_timer);
+ timer_delete_sync(&pdata->service_timer);
for (i = 0; i < pdata->channel_count; i++) {
channel = pdata->channel[i];
@@ -736,7 +629,7 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
break;
/* Deactivate the Tx timer */
- del_timer_sync(&channel->tx_timer);
+ timer_delete_sync(&channel->tx_timer);
channel->tx_timer_active = 0;
}
}
@@ -2257,10 +2150,17 @@ static int xgbe_set_features(struct net_device *netdev,
if (ret)
return ret;
- if ((features & NETIF_F_RXCSUM) && !rxcsum)
+ if ((features & NETIF_F_RXCSUM) && !rxcsum) {
+ hw_if->enable_sph(pdata);
+ hw_if->enable_vxlan(pdata);
hw_if->enable_rx_csum(pdata);
- else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+ schedule_work(&pdata->restart_work);
+ } else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
+ hw_if->disable_sph(pdata);
+ hw_if->disable_vxlan(pdata);
hw_if->disable_rx_csum(pdata);
+ schedule_work(&pdata->restart_work);
+ }
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
hw_if->enable_rx_vlan_stripping(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 4431ab1c18b3..be0d2c7d08dc 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 7a833894f52a..d40011e8ddf2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 0e8698928e4d..4ebdd123c435 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 07f4f3418d01..1a37ec45e650 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/interrupt.h>
@@ -375,6 +266,10 @@ static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable,
reg |= MDIO_VEND2_CTRL1_AN_RESTART;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL);
+ reg |= XGBE_VEND2_MAC_AUTO_SW;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL, reg);
}
static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
@@ -1003,6 +898,11 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
(pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+
}
static void xgbe_an73_init(struct xgbe_prv_data *pdata)
@@ -1404,6 +1304,10 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
&an_restart);
+ /* bail out if the link status register read fails */
+ if (pdata->phy.link < 0)
+ return;
+
if (an_restart) {
xgbe_phy_config_aneg(pdata);
goto adjust_link;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index c636999a6a84..097ec5e4f261 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -1,123 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/log2.h>
+#include "xgbe-smn.h"
#include "xgbe.h"
#include "xgbe-common.h"
@@ -207,14 +99,14 @@ out:
static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct xgbe_prv_data *pdata;
- struct device *dev = &pdev->dev;
void __iomem * const *iomap_table;
- struct pci_dev *rdev;
+ unsigned int port_addr_size, reg;
+ struct device *dev = &pdev->dev;
+ struct xgbe_prv_data *pdata;
unsigned int ma_lo, ma_hi;
- unsigned int reg;
- int bar_mask;
- int ret;
+ struct pci_dev *rdev;
+ int bar_mask, ret;
+ u32 address;
pdata = xgbe_alloc_pdata(dev);
if (IS_ERR(pdata)) {
@@ -274,20 +166,31 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the PCS indirect addressing definition registers */
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
- if (rdev &&
- (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
- pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
- pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
- } else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) &&
- (rdev->device == 0x14b5)) {
- pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
- pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
-
- /* Yellow Carp devices do not need cdr workaround */
- pdata->vdata->an_cdr_workaround = 0;
-
- /* Yellow Carp devices do not need rrc */
- pdata->vdata->enable_rrc = 0;
+ if (rdev && rdev->vendor == PCI_VENDOR_ID_AMD) {
+ switch (rdev->device) {
+ case XGBE_RV_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ break;
+ case XGBE_YC_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
+
+ /* Yellow Carp devices do not need cdr workaround */
+ pdata->vdata->an_cdr_workaround = 0;
+
+ /* Yellow Carp devices do not need rrc */
+ pdata->vdata->enable_rrc = 0;
+ break;
+ case XGBE_RN_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V3_RN_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V3_RN_WINDOW_SELECT;
+ break;
+ default:
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ break;
+ }
} else {
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -295,7 +198,22 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_dev_put(rdev);
/* Configure the PCS indirect addressing support */
- reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ if (pdata->vdata->xpcs_access == XGBE_XPCS_ACCESS_V3) {
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ port_addr_size = PCS_RN_PORT_ADDR_SIZE *
+ XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
+ pdata->smn_base = PCS_RN_SMN_BASE_ADDR + port_addr_size;
+
+ address = pdata->smn_base + (pdata->xpcs_window_def_reg);
+ ret = amd_smn_read(0, address, &reg);
+ if (ret) {
+ pci_err(pdata->pcidev, "Failed to read data\n");
+ goto err_pci_enable;
+ }
+ } else {
+ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ }
+
pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
pdata->xpcs_window <<= 6;
pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
@@ -473,6 +391,22 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev)
return ret;
}
+static struct xgbe_version_data xgbe_v3 = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = XGBE_XPCS_ACCESS_V3,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
+ .an_cdr_workaround = 0,
+ .enable_rrc = 0,
+};
+
static struct xgbe_version_data xgbe_v2a = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
@@ -510,6 +444,8 @@ static const struct pci_device_id xgbe_pci_table[] = {
.driver_data = (kernel_ulong_t)&xgbe_v2a },
{ PCI_VDEVICE(AMD, 0x1459),
.driver_data = (kernel_ulong_t)&xgbe_v2b },
+ { PCI_VDEVICE(AMD, 0x1641),
+ .driver_data = (kernel_ulong_t)&xgbe_v3 },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
index d16eae415f72..2e6b8ffe785c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 6a716337f48b..23c39e92e783 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -923,7 +814,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -945,14 +835,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
-
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
@@ -964,7 +847,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -1028,13 +910,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
@@ -2870,8 +2746,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int reg;
- int ret;
+ int reg, ret;
*an_restart = 0;
@@ -2905,11 +2780,20 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 0;
}
- /* Link status is latched low, so read once to clear
- * and then read again to get current state
- */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg < 0)
+ return reg;
+
+ /* Link status is latched low so that momentary link drops
+ * can be detected. If link was already down read again
+ * to get the latest state.
+ */
+
+ if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) {
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg < 0)
+ return reg;
+ }
if (pdata->en_rx_adap) {
/* if the link is available and adaptation is done,
@@ -2928,9 +2812,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
xgbe_phy_set_mode(pdata, phy_data->cur_mode);
}
- /* check again for the link and adaptation status */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ if (pdata->rx_adapt_done)
return 1;
} else if (reg & MDIO_STAT1_LSTATUS)
return 1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 4365bd62942c..47d53e59ccf6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index 7051bd7cf6dc..978c4dd01fa0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/clk.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-smn.h b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h
new file mode 100644
index 000000000000..c6ae127ced03
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#ifndef __SMN_H__
+#define __SMN_H__
+
+#ifdef CONFIG_AMD_NB
+
+#include <asm/amd/nb.h>
+
+#else
+
+static inline int amd_smn_write(u16 node, u32 address, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int amd_smn_read(u16 node, u32 address, u32 *value)
+{
+ return -ENODEV;
+}
+
+#endif
+#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index d85386cac8d1..057379cd43ba 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#ifndef __XGBE_H__
@@ -292,12 +183,12 @@
#define XGBE_LINK_TIMEOUT 5
#define XGBE_KR_TRAINING_WAIT_ITER 50
-#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define XGBE_SGMII_AN_LINK_DUPLEX BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
#define XGBE_SGMII_AN_LINK_SPEED_10 0x00
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
-#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+#define XGBE_SGMII_AN_LINK_STATUS BIT(4)
/* ECC correctable error notification window (seconds) */
#define XGBE_ECC_LIMIT 60
@@ -347,6 +238,15 @@
(_src)->link_modes._sname, \
__ETHTOOL_LINK_MODE_MASK_NBITS)
+/* XGBE PCI device id */
+#define XGBE_RV_PCI_DEVICE_ID 0x15d0
+#define XGBE_YC_PCI_DEVICE_ID 0x14b5
+#define XGBE_RN_PCI_DEVICE_ID 0x1630
+
+ /* Generic low and high masks */
+#define XGBE_GEN_HI_MASK GENMASK(31, 16)
+#define XGBE_GEN_LO_MASK GENMASK(15, 0)
+
struct xgbe_prv_data;
struct xgbe_packet_data {
@@ -565,6 +465,7 @@ enum xgbe_speed {
enum xgbe_xpcs_access {
XGBE_XPCS_ACCESS_V1 = 0,
XGBE_XPCS_ACCESS_V2,
+ XGBE_XPCS_ACCESS_V3,
};
enum xgbe_an_mode {
@@ -865,6 +766,10 @@ struct xgbe_hw_if {
void (*enable_vxlan)(struct xgbe_prv_data *);
void (*disable_vxlan)(struct xgbe_prv_data *);
void (*set_vxlan_id)(struct xgbe_prv_data *);
+
+ /* For Split Header */
+ void (*enable_sph)(struct xgbe_prv_data *pdata);
+ void (*disable_sph)(struct xgbe_prv_data *pdata);
};
/* This structure represents implementation specific routines for an
@@ -1056,6 +961,7 @@ struct xgbe_prv_data {
struct device *dev;
struct platform_device *phy_platdev;
struct device *phy_dev;
+ unsigned int smn_base;
/* Version related data */
struct xgbe_version_data *vdata;
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 2a91c84aebdb..d7ca847d44c7 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -9,8 +9,6 @@
#include "main.h"
-static const struct acpi_device_id xge_acpi_match[];
-
static int xge_get_resources(struct xge_pdata *pdata)
{
struct platform_device *pdev;
@@ -731,7 +729,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
static struct platform_driver xge_driver = {
.driver = {
.name = "xgene-enet-v2",
- .acpi_match_table = ACPI_PTR(xge_acpi_match),
+ .acpi_match_table = xge_acpi_match,
},
.probe = xge_probe,
.remove = xge_remove,
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
index eba06831aec2..6a17045a5f62 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mdio.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -97,7 +97,6 @@ void xge_mdio_remove(struct net_device *ndev)
int xge_mdio_config(struct net_device *ndev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct xge_pdata *pdata = netdev_priv(ndev);
struct device *dev = &pdata->pdev->dev;
struct mii_bus *mdio_bus;
@@ -137,17 +136,12 @@ int xge_mdio_config(struct net_device *ndev)
goto err;
}
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask);
-
- linkmode_andnot(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
pdata->phy_speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index e641dbbea1e2..b854b6b42d77 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -421,18 +421,12 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
if (dev->of_node) {
struct clk *parent = clk_get_parent(pdata->clk);
+ long rate = rgmii_clock(pdata->phy_speed);
- switch (pdata->phy_speed) {
- case SPEED_10:
- clk_set_rate(parent, 2500000);
- break;
- case SPEED_100:
- clk_set_rate(parent, 25000000);
- break;
- default:
- clk_set_rate(parent, 125000000);
- break;
- }
+ if (rate < 0)
+ rate = 125000000;
+
+ clk_set_rate(parent, rate);
}
#ifdef CONFIG_ACPI
else {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 86607b79c09f..cc3b1631c905 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -6,8 +6,14 @@
* Keyur Chudgar <kchudgar@apm.com>
*/
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_xgmac.h"
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 785f4b4ff758..b3bf8d6f88e8 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -20,7 +20,6 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
-#include <linux/crc32poly.h>
#include <linux/bitrev.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
@@ -461,7 +460,7 @@ static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
/* prolly should wait for dma to finish & turn off the chip */
spin_lock_irqsave(&bp->lock, flags);
if (bp->timeout_active) {
- del_timer(&bp->tx_timeout);
+ timer_delete(&bp->tx_timeout);
bp->timeout_active = 0;
}
disable_irq(dev->irq);
@@ -546,7 +545,7 @@ static inline void bmac_set_timeout(struct net_device *dev)
spin_lock_irqsave(&bp->lock, flags);
if (bp->timeout_active)
- del_timer(&bp->tx_timeout);
+ timer_delete(&bp->tx_timeout);
bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
add_timer(&bp->tx_timeout);
bp->timeout_active = 1;
@@ -755,7 +754,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
XXDEBUG(("bmac_txdma_intr\n"));
}
- /* del_timer(&bp->tx_timeout); */
+ /* timer_delete(&bp->tx_timeout); */
/* bp->timeout_active = 0; */
while (1) {
@@ -796,59 +795,6 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
}
#ifndef SUNHME_MULTICAST
-/* Real fast bit-reversal algorithm, 6-bit values */
-static int reverse6[64] = {
- 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
- 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
- 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
- 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
- 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
- 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
- 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
- 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
-};
-
-static unsigned int
-crc416(unsigned int curval, unsigned short nxtval)
-{
- unsigned int counter, cur = curval, next = nxtval;
- int high_crc_set, low_data_set;
-
- /* Swap bytes */
- next = ((next & 0x00FF) << 8) | (next >> 8);
-
- /* Compute bit-by-bit */
- for (counter = 0; counter < 16; ++counter) {
- /* is high CRC bit set? */
- if ((cur & 0x80000000) == 0) high_crc_set = 0;
- else high_crc_set = 1;
-
- cur = cur << 1;
-
- if ((next & 0x0001) == 0) low_data_set = 0;
- else low_data_set = 1;
-
- next = next >> 1;
-
- /* do the XOR */
- if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
- }
- return cur;
-}
-
-static unsigned int
-bmac_crc(unsigned short *address)
-{
- unsigned int newcrc;
-
- XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
- newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
- newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
- newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
-
- return(newcrc);
-}
-
/*
* Add requested mcast addr to BMac's hash table filter.
*
@@ -861,8 +807,7 @@ bmac_addhash(struct bmac_data *bp, unsigned char *addr)
unsigned short mask;
if (!(*addr)) return;
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ crc = crc32(~0, addr, ETH_ALEN) >> 26;
if (bp->hash_use_count[crc]++) return; /* This bit is already set */
mask = crc % 16;
mask = (unsigned char)1 << mask;
@@ -876,8 +821,7 @@ bmac_removehash(struct bmac_data *bp, unsigned char *addr)
unsigned char mask;
/* Now, delete the address from the filter copy, as indicated */
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ crc = crc32(~0, addr, ETH_ALEN) >> 26;
if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
mask = crc % 16;
@@ -1466,7 +1410,7 @@ bmac_output(struct sk_buff *skb, struct net_device *dev)
static void bmac_tx_timeout(struct timer_list *t)
{
- struct bmac_data *bp = from_timer(bp, t, tx_timeout);
+ struct bmac_data *bp = timer_container_of(bp, t, tx_timeout);
struct net_device *dev = macio_get_drvdata(bp->mdev);
volatile struct dbdma_regs __iomem *td = bp->tx_dma;
volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index e6350971c707..af26905e44e3 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -523,7 +523,7 @@ static inline void mace_set_timeout(struct net_device *dev)
struct mace_data *mp = netdev_priv(dev);
if (mp->timeout_active)
- del_timer(&mp->tx_timeout);
+ timer_delete(&mp->tx_timeout);
mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
add_timer(&mp->tx_timeout);
mp->timeout_active = 1;
@@ -676,7 +676,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
i = mp->tx_empty;
while (in_8(&mb->pr) & XMTSV) {
- del_timer(&mp->tx_timeout);
+ timer_delete(&mp->tx_timeout);
mp->timeout_active = 0;
/*
* Clear any interrupt indication associated with this status
@@ -805,7 +805,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
static void mace_tx_timeout(struct timer_list *t)
{
- struct mace_data *mp = from_timer(mp, t, tx_timeout);
+ struct mace_data *mp = timer_container_of(mp, t, tx_timeout);
struct net_device *dev = macio_get_drvdata(mp->mdev);
volatile struct mace __iomem *mb = mp->mace;
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
index 414b2e448d59..787ea91802e7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -113,19 +113,9 @@ static const struct hwmon_ops aq_hwmon_ops = {
.read_string = aq_hwmon_read_string,
};
-static u32 aq_hwmon_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info aq_hwmon_temp = {
- .type = hwmon_temp,
- .config = aq_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const aq_hwmon_info[] = {
- &aq_hwmon_temp,
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index c1d1673c5749..b565189e5913 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -123,7 +123,6 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
}
#endif
- skb_tx_timestamp(skb);
return aq_nic_xmit(aq_nic, skb);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index fe0e3e2a8117..b24eaa5283fa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -254,7 +254,7 @@ static void aq_nic_service_task(struct work_struct *work)
static void aq_nic_service_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = from_timer(self, t, service_timer);
+ struct aq_nic_s *self = timer_container_of(self, t, service_timer);
mod_timer(&self->service_timer,
jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
@@ -264,7 +264,7 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
static void aq_nic_polling_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = from_timer(self, t, polling_timer);
+ struct aq_nic_s *self = timer_container_of(self, t, polling_timer);
unsigned int i = 0U;
for (i = 0U; self->aq_vecs > i; ++i)
@@ -898,6 +898,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
frags = aq_nic_map_skb(self, skb, ring);
+ skb_tx_timestamp(skb);
+
if (likely(frags)) {
err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
ring, frags);
@@ -1389,13 +1391,13 @@ int aq_nic_stop(struct aq_nic_s *self)
netif_tx_disable(self->ndev);
netif_carrier_off(self->ndev);
- del_timer_sync(&self->service_timer);
+ timer_delete_sync(&self->service_timer);
cancel_work_sync(&self->service_task);
self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
if (self->aq_nic_cfg.is_polling)
- del_timer_sync(&self->polling_timer);
+ timer_delete_sync(&self->polling_timer);
else
aq_pci_func_free_irqs(self);
@@ -1441,7 +1443,9 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
aq_ptp_ring_free(self);
aq_ptp_free(self);
- if (likely(self->aq_fw_ops->deinit) && link_down) {
+ /* May be invoked during hot unplug. */
+ if (pci_device_is_present(self->pdev) &&
+ likely(self->aq_fw_ops->deinit) && link_down) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
mutex_unlock(&self->fwreq_mutex);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index f5901f8e3907..f6b990b7f5b4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -226,7 +226,6 @@ struct __packed offload_info {
struct offload_port_info ports;
struct offload_ka_info kas;
struct offload_rr_info rrs;
- u8 buf[];
};
struct __packed hw_atl_utils_fw_rpc {
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 3d4c3d8698e2..d8e6f23e1432 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1391,7 +1391,7 @@ static void ag71xx_hw_disable(struct ag71xx *ag)
ag71xx_dma_reset(ag);
napi_disable(&ag->napi);
- del_timer_sync(&ag->oom_timer);
+ timer_delete_sync(&ag->oom_timer);
ag71xx_rings_cleanup(ag);
}
@@ -1563,7 +1563,7 @@ err_drop:
static void ag71xx_oom_timer_handler(struct timer_list *t)
{
- struct ag71xx *ag = from_timer(ag, t, oom_timer);
+ struct ag71xx *ag = timer_container_of(ag, t, oom_timer);
napi_schedule(&ag->napi);
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index c571614b1d50..ef1a51347351 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -231,8 +231,8 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
*/
static void atl1c_phy_config(struct timer_list *t)
{
- struct atl1c_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1c_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1c_hw *hw = &adapter->hw;
unsigned long flags;
@@ -357,7 +357,7 @@ static void atl1c_common_task(struct work_struct *work)
static void atl1c_del_timer(struct atl1c_adapter *adapter)
{
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 9b778b34b67e..40290028580b 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -115,8 +115,8 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
*/
static void atl1e_phy_config(struct timer_list *t)
{
- struct atl1e_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1e_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1e_hw *hw = &adapter->hw;
unsigned long flags;
@@ -232,7 +232,7 @@ static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
static void atl1e_del_timer(struct atl1e_adapter *adapter)
{
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
}
static void atl1e_cancel_work(struct atl1e_adapter *adapter)
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 3afd3627ce48..98a4d089270e 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1861,14 +1861,21 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
break;
}
- buffer_info->alloced = 1;
- buffer_info->skb = skb;
- buffer_info->length = (u16) adapter->rx_buffer_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ kfree_skb(skb);
+ adapter->soft_stats.rx_dropped++;
+ break;
+ }
+
+ buffer_info->alloced = 1;
+ buffer_info->skb = skb;
+ buffer_info->length = (u16)adapter->rx_buffer_len;
+
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
rfd_desc->coalese = 0;
@@ -2183,8 +2190,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
return 0;
}
-static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
- struct tx_packet_desc *ptpd)
+static bool atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ struct tx_packet_desc *ptpd)
{
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
struct atl1_buffer *buffer_info;
@@ -2194,6 +2201,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
unsigned int nr_frags;
unsigned int f;
int retval;
+ u16 first_mapped;
u16 next_to_use;
u16 data_len;
u8 hdr_len;
@@ -2201,6 +2209,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buf_len -= skb->data_len;
nr_frags = skb_shinfo(skb)->nr_frags;
next_to_use = atomic_read(&tpd_ring->next_to_use);
+ first_mapped = next_to_use;
buffer_info = &tpd_ring->buffer_info[next_to_use];
BUG_ON(buffer_info->skb);
/* put skb in last TPD */
@@ -2216,6 +2225,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, hdr_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2242,6 +2253,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
page, offset,
buffer_info->length,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2254,6 +2268,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, buf_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2277,6 +2293,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
frag, i * ATL1_MAX_TX_BUF_LEN,
buffer_info->length, DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2285,6 +2304,22 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
/* last tpd's buffer-info */
buffer_info->skb = skb;
+
+ return true;
+
+ dma_err:
+ while (first_mapped != next_to_use) {
+ buffer_info = &tpd_ring->buffer_info[first_mapped];
+ dma_unmap_page(&adapter->pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ buffer_info->dma = 0;
+
+ if (++first_mapped == tpd_ring->count)
+ first_mapped = 0;
+ }
+ return false;
}
static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
@@ -2355,10 +2390,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
len = skb_headlen(skb);
- if (unlikely(skb->len <= 0)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (unlikely(skb->len <= 0))
+ goto drop_packet;
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
@@ -2371,10 +2404,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
proto_hdr_len = skb_tcp_all_headers(skb);
- if (unlikely(proto_hdr_len > len)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (unlikely(proto_hdr_len > len))
+ goto drop_packet;
+
/* need additional TPD ? */
if (proto_hdr_len != len)
count += (len - proto_hdr_len +
@@ -2406,23 +2438,26 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
}
tso = atl1_tso(adapter, skb, ptpd);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (tso < 0)
+ goto drop_packet;
if (!tso) {
ret_val = atl1_tx_csum(adapter, skb, ptpd);
- if (ret_val < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (ret_val < 0)
+ goto drop_packet;
}
- atl1_tx_map(adapter, skb, ptpd);
+ if (!atl1_tx_map(adapter, skb, ptpd))
+ goto drop_packet;
+
atl1_tx_queue(adapter, count, ptpd);
atl1_update_mailbox(adapter);
return NETDEV_TX_OK;
+
+drop_packet:
+ adapter->soft_stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
static int atl1_rings_clean(struct napi_struct *napi, int budget)
@@ -2556,8 +2591,8 @@ static irqreturn_t atl1_intr(int irq, void *data)
*/
static void atl1_phy_config(struct timer_list *t)
{
- struct atl1_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1_hw *hw = &adapter->hw;
unsigned long flags;
@@ -2641,7 +2676,7 @@ static void atl1_down(struct atl1_adapter *adapter)
napi_disable(&adapter->napi);
netif_stop_queue(netdev);
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
adapter->phy_timer_pending = false;
atlx_irq_disable(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index fa9a4919f25d..280e2f5f4aa5 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -752,8 +752,8 @@ static void atl2_down(struct atl2_adapter *adapter)
atl2_irq_disable(adapter);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
clear_bit(0, &adapter->cfg_phy);
netif_carrier_off(netdev);
@@ -1010,7 +1010,8 @@ static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue)
*/
static void atl2_watchdog(struct timer_list *t)
{
- struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct atl2_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
u32 drop_rxd, drop_rxs;
@@ -1035,8 +1036,8 @@ static void atl2_watchdog(struct timer_list *t)
*/
static void atl2_phy_config(struct timer_list *t)
{
- struct atl2_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl2_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl2_hw *hw = &adapter->hw;
unsigned long flags;
@@ -1468,8 +1469,8 @@ static void atl2_remove(struct pci_dev *pdev)
* explicitly disable watchdog tasks from being rescheduled */
set_bit(__ATL2_DOWN, &adapter->flags);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->link_chg_task);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index eeec8bf17cf4..81a74e07464f 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -96,7 +96,6 @@ config BNX2
config CNIC
tristate "QLogic CNIC support"
depends on PCI && (IPV6 || IPV6=n)
- depends on MMU
select BNX2
select UIO
help
@@ -123,6 +122,7 @@ config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
select PHYLIB
help
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -143,7 +143,7 @@ config BNX2X
depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
select ZLIB_INFLATE
- select LIBCRC32C
+ select CRC32
select MDIO
help
This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
@@ -207,7 +207,7 @@ config BNXT
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
- select LIBCRC32C
+ select CRC32
select NET_DEVLINK
select PAGE_POOL
select DIMLIB
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index a68fab1b05f0..fd35f4b4dc50 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -141,7 +141,7 @@ void bcmasp_flush_rx_port(struct bcmasp_intf *intf)
return;
}
- rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush);
+ rx_ctrl_core_wl(priv, mask, ASP_RX_CTRL_FLUSH);
}
static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
@@ -518,7 +518,7 @@ void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
int ret, i;
/* Write all filters to HW */
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
/* If the filter does not match the port, skip programming. */
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
@@ -551,7 +551,7 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
struct bcmasp_priv *priv = intf->parent;
int j = 0, i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -577,7 +577,7 @@ int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
struct bcmasp_priv *priv = intf->parent;
int cnt = 0, i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -602,7 +602,7 @@ bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
size_t fs_size = 0;
int i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -670,7 +670,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
int i, open_index = -1;
/* Check whether we exceed the filter table capacity */
- if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS)
+ if (loc != RX_CLS_LOC_ANY && loc >= priv->num_net_filters)
return ERR_PTR(-EINVAL);
/* If the filter location is busy (already claimed) and we are initializing
@@ -686,7 +686,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
/* Initialize the loop index based on the desired location or from 0 */
i = loc == RX_CLS_LOC_ANY ? 0 : loc;
- for ( ; i < NUM_NET_FILTERS; i++) {
+ for ( ; i < priv->num_net_filters; i++) {
/* Found matching network filter */
if (!init &&
priv->net_filters[i].claimed &&
@@ -779,7 +779,7 @@ static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en,
priv->mda_filters[i].en = en;
priv->mda_filters[i].port = intf->port;
- rx_filter_core_wl(priv, ((intf->channel + 8) |
+ rx_filter_core_wl(priv, ((intf->channel + priv->tx_chan_offset) |
(en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) |
ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)),
ASP_RX_FILTER_MDA_CFG(i));
@@ -865,7 +865,7 @@ void bcmasp_disable_all_filters(struct bcmasp_intf *intf)
res_count = bcmasp_total_res_mda_cnt(intf->parent);
/* Disable all filters held by this port */
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
if (priv->mda_filters[i].en &&
priv->mda_filters[i].port == intf->port)
bcmasp_en_mda_filter(intf, 0, i);
@@ -909,7 +909,7 @@ int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
res_count = bcmasp_total_res_mda_cnt(intf->parent);
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
/* If filter not enabled or belongs to another port skip */
if (!priv->mda_filters[i].en ||
priv->mda_filters[i].port != intf->port)
@@ -924,7 +924,7 @@ int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
}
/* Create new filter if possible */
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
if (priv->mda_filters[i].en)
continue;
@@ -944,12 +944,12 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
/* Disable all filters and reset software view since the HW
* can lose context while in deep sleep suspend states
*/
- for (i = 0; i < NUM_MDA_FILTERS; i++) {
+ for (i = 0; i < priv->num_mda_filters; i++) {
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i));
priv->mda_filters[i].en = 0;
}
- for (i = 0; i < NUM_NET_FILTERS; i++)
+ for (i = 0; i < priv->num_net_filters; i++)
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
/* Top level filter enable bit should be enabled at all times, set
@@ -966,18 +966,8 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
/* ASP core initialization */
static void bcmasp_core_init(struct bcmasp_priv *priv)
{
- tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL);
- rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL);
-
- rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT),
- ASP_EDPKT_HDR_CFG);
- rx_edpkt_core_wl(priv,
- (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT),
- ASP_EDPKT_ENDI);
-
rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT);
rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT);
- rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT);
rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE);
@@ -1020,6 +1010,18 @@ static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow)
ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT);
}
+static void bcmasp_core_clock_select_one_ctrl2(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
+}
+
static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set)
{
u32 reg;
@@ -1108,7 +1110,7 @@ static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i)
return irq;
}
-static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
+static void bcmasp_init_wol(struct bcmasp_priv *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
@@ -1125,7 +1127,7 @@ static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
device_set_wakeup_capable(&pdev->dev, 1);
}
-static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
+void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en)
{
struct bcmasp_priv *priv = intf->parent;
struct device *dev = &priv->pdev->dev;
@@ -1154,54 +1156,12 @@ static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
}
}
-static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv)
+static void bcmasp_wol_irq_destroy(struct bcmasp_priv *priv)
{
if (priv->wol_irq > 0)
free_irq(priv->wol_irq, priv);
}
-static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv)
-{
- struct platform_device *pdev = priv->pdev;
- struct device *dev = &pdev->dev;
- struct bcmasp_intf *intf;
- int irq;
-
- list_for_each_entry(intf, &priv->intfs, list) {
- irq = bcmasp_get_and_request_irq(priv, intf->port + 1);
- if (irq < 0) {
- dev_warn(dev, "Failed to init WoL irq(port %d): %d\n",
- intf->port, irq);
- continue;
- }
-
- intf->wol_irq = irq;
- intf->wol_irq_enabled = false;
- device_set_wakeup_capable(&pdev->dev, 1);
- }
-}
-
-static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en)
-{
- struct device *dev = &intf->parent->pdev->dev;
-
- if (en ^ intf->wol_irq_enabled)
- irq_set_irq_wake(intf->wol_irq, en);
-
- intf->wol_irq_enabled = en;
- device_set_wakeup_enable(dev, en);
-}
-
-static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
-{
- struct bcmasp_intf *intf;
-
- list_for_each_entry(intf, &priv->intfs, list) {
- if (intf->wol_irq > 0)
- free_irq(intf->wol_irq, priv);
- }
-}
-
static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
{
u32 reg, phy_lpi_overwrite;
@@ -1220,70 +1180,53 @@ static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
usleep_range(50, 100);
}
-static struct bcmasp_hw_info v20_hw_info = {
- .rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
- .umac2fb = UMAC2FB_OFFSET,
- .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT,
- .rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT,
- .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH,
-};
-
-static const struct bcmasp_plat_data v20_plat_data = {
- .init_wol = bcmasp_init_wol_per_intf,
- .enable_wol = bcmasp_enable_wol_per_intf,
- .destroy_wol = bcmasp_wol_irq_destroy_per_intf,
- .core_clock_select = bcmasp_core_clock_select_one,
- .hw_info = &v20_hw_info,
-};
-
-static struct bcmasp_hw_info v21_hw_info = {
- .rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1,
- .umac2fb = UMAC2FB_OFFSET_2_1,
- .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1,
- .rx_ctrl_fb_filt_out_frame_count =
- ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1,
- .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1,
-};
-
static const struct bcmasp_plat_data v21_plat_data = {
- .init_wol = bcmasp_init_wol_shared,
- .enable_wol = bcmasp_enable_wol_shared,
- .destroy_wol = bcmasp_wol_irq_destroy_shared,
.core_clock_select = bcmasp_core_clock_select_one,
- .hw_info = &v21_hw_info,
+ .num_mda_filters = 32,
+ .num_net_filters = 32,
+ .tx_chan_offset = 8,
+ .rx_ctrl_offset = 0x0,
};
static const struct bcmasp_plat_data v22_plat_data = {
- .init_wol = bcmasp_init_wol_shared,
- .enable_wol = bcmasp_enable_wol_shared,
- .destroy_wol = bcmasp_wol_irq_destroy_shared,
.core_clock_select = bcmasp_core_clock_select_many,
- .hw_info = &v21_hw_info,
.eee_fixup = bcmasp_eee_fixup,
+ .num_mda_filters = 32,
+ .num_net_filters = 32,
+ .tx_chan_offset = 8,
+ .rx_ctrl_offset = 0x0,
+};
+
+static const struct bcmasp_plat_data v30_plat_data = {
+ .core_clock_select = bcmasp_core_clock_select_one_ctrl2,
+ .num_mda_filters = 20,
+ .num_net_filters = 16,
+ .tx_chan_offset = 0,
+ .rx_ctrl_offset = 0x10000,
};
static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata)
{
- priv->init_wol = pdata->init_wol;
- priv->enable_wol = pdata->enable_wol;
- priv->destroy_wol = pdata->destroy_wol;
priv->core_clock_select = pdata->core_clock_select;
priv->eee_fixup = pdata->eee_fixup;
- priv->hw_info = pdata->hw_info;
+ priv->num_mda_filters = pdata->num_mda_filters;
+ priv->num_net_filters = pdata->num_net_filters;
+ priv->tx_chan_offset = pdata->tx_chan_offset;
+ priv->rx_ctrl_offset = pdata->rx_ctrl_offset;
}
static const struct of_device_id bcmasp_of_match[] = {
- { .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
{ .compatible = "brcm,asp-v2.2", .data = &v22_plat_data },
+ { .compatible = "brcm,asp-v3.0", .data = &v30_plat_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_of_match);
static const struct of_device_id bcmasp_mdio_of_match[] = {
- { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
- { .compatible = "brcm,asp-v2.0-mdio", },
+ { .compatible = "brcm,asp-v2.2-mdio", },
+ { .compatible = "brcm,asp-v3.0-mdio", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match);
@@ -1365,6 +1308,17 @@ static int bcmasp_probe(struct platform_device *pdev)
* how many interfaces come up.
*/
bcmasp_core_init(priv);
+
+ priv->mda_filters = devm_kcalloc(dev, priv->num_mda_filters,
+ sizeof(*priv->mda_filters), GFP_KERNEL);
+ if (!priv->mda_filters)
+ return -ENOMEM;
+
+ priv->net_filters = devm_kcalloc(dev, priv->num_net_filters,
+ sizeof(*priv->net_filters), GFP_KERNEL);
+ if (!priv->net_filters)
+ return -ENOMEM;
+
bcmasp_core_init_filters(priv);
ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
@@ -1387,7 +1341,7 @@ static int bcmasp_probe(struct platform_device *pdev)
}
/* Check and enable WoL */
- priv->init_wol(priv);
+ bcmasp_init_wol(priv);
/* Drop the clock reference count now and let ndo_open()/ndo_close()
* manage it for us from now on.
@@ -1404,7 +1358,7 @@ static int bcmasp_probe(struct platform_device *pdev)
if (ret) {
netdev_err(intf->ndev,
"failed to register net_device: %d\n", ret);
- priv->destroy_wol(priv);
+ bcmasp_wol_irq_destroy(priv);
bcmasp_remove_intfs(priv);
goto of_put_exit;
}
@@ -1425,7 +1379,7 @@ static void bcmasp_remove(struct platform_device *pdev)
if (!priv)
return;
- priv->destroy_wol(priv);
+ bcmasp_wol_irq_destroy(priv);
bcmasp_remove_intfs(priv);
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index f93cb3da44b0..74adfdb50e11 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -53,22 +53,15 @@
#define ASP_RX_CTRL_FB_0_FRAME_COUNT 0x14
#define ASP_RX_CTRL_FB_1_FRAME_COUNT 0x18
#define ASP_RX_CTRL_FB_8_FRAME_COUNT 0x1c
-/* asp2.1 diverges offsets here */
-/* ASP2.0 */
-#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x20
-#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x24
-#define ASP_RX_CTRL_FLUSH 0x28
-#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
-#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
-#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
-#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x30
-/* ASP2.1 */
-#define ASP_RX_CTRL_FB_9_FRAME_COUNT_2_1 0x20
-#define ASP_RX_CTRL_FB_10_FRAME_COUNT_2_1 0x24
-#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1 0x28
-#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1 0x2c
-#define ASP_RX_CTRL_FLUSH_2_1 0x30
-#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1 0x38
+#define ASP_RX_CTRL_FB_9_FRAME_COUNT 0x20
+#define ASP_RX_CTRL_FB_10_FRAME_COUNT 0x24
+#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x28
+#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x2c
+#define ASP_RX_CTRL_FLUSH 0x30
+#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
+#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
+#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
+#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x38
#define ASP_RX_FILTER_OFFSET 0x80000
#define ASP_RX_FILTER_BLK_CTRL 0x0
@@ -345,11 +338,6 @@ struct bcmasp_intf {
u32 wolopts;
u8 sopass[SOPASS_MAX];
- /* Used if per intf wol irq */
- int wol_irq;
- unsigned int wol_irq_enabled:1;
-
- struct ethtool_keee eee;
};
#define NUM_NET_FILTERS 32
@@ -372,21 +360,13 @@ struct bcmasp_mda_filter {
u8 mask[ETH_ALEN];
};
-struct bcmasp_hw_info {
- u32 rx_ctrl_flush;
- u32 umac2fb;
- u32 rx_ctrl_fb_out_frame_count;
- u32 rx_ctrl_fb_filt_out_frame_count;
- u32 rx_ctrl_fb_rx_fifo_depth;
-};
-
struct bcmasp_plat_data {
- void (*init_wol)(struct bcmasp_priv *priv);
- void (*enable_wol)(struct bcmasp_intf *intf, bool en);
- void (*destroy_wol)(struct bcmasp_priv *priv);
void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
void (*eee_fixup)(struct bcmasp_intf *priv, bool en);
- struct bcmasp_hw_info *hw_info;
+ unsigned int num_mda_filters;
+ unsigned int num_net_filters;
+ unsigned int tx_chan_offset;
+ unsigned int rx_ctrl_offset;
};
struct bcmasp_priv {
@@ -401,18 +381,18 @@ struct bcmasp_priv {
int wol_irq;
unsigned long wol_irq_enabled_mask;
- void (*init_wol)(struct bcmasp_priv *priv);
- void (*enable_wol)(struct bcmasp_intf *intf, bool en);
- void (*destroy_wol)(struct bcmasp_priv *priv);
void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
void (*eee_fixup)(struct bcmasp_intf *intf, bool en);
+ unsigned int num_mda_filters;
+ unsigned int num_net_filters;
+ unsigned int tx_chan_offset;
+ unsigned int rx_ctrl_offset;
void __iomem *base;
- struct bcmasp_hw_info *hw_info;
struct list_head intfs;
- struct bcmasp_mda_filter mda_filters[NUM_MDA_FILTERS];
+ struct bcmasp_mda_filter *mda_filters;
/* MAC destination address filters lock */
spinlock_t mda_lock;
@@ -420,7 +400,7 @@ struct bcmasp_priv {
/* Protects accesses to ASP_CTRL_CLOCK_CTRL */
spinlock_t clk_lock;
- struct bcmasp_net_filter net_filters[NUM_NET_FILTERS];
+ struct bcmasp_net_filter *net_filters;
/* Network filter lock */
struct mutex net_lock;
@@ -510,8 +490,8 @@ BCMASP_FP_IO_MACRO_Q(rx_edpkt_cfg);
#define PKT_OFFLOAD_EPKT_IP(x) ((x) << 21)
#define PKT_OFFLOAD_EPKT_TP(x) ((x) << 19)
#define PKT_OFFLOAD_EPKT_LEN(x) ((x) << 16)
-#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(15)
-#define PKT_OFFLOAD_EPKT_CSUM_L2 BIT(14)
+#define PKT_OFFLOAD_EPKT_CSUM_L4 BIT(15)
+#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(14)
#define PKT_OFFLOAD_EPKT_ID(x) ((x) << 12)
#define PKT_OFFLOAD_EPKT_SEQ(x) ((x) << 10)
#define PKT_OFFLOAD_EPKT_TS(x) ((x) << 8)
@@ -543,12 +523,27 @@ BCMASP_CORE_IO_MACRO(intr2, ASP_INTR2_OFFSET);
BCMASP_CORE_IO_MACRO(wakeup_intr2, ASP_WAKEUP_INTR2_OFFSET);
BCMASP_CORE_IO_MACRO(tx_analytics, ASP_TX_ANALYTICS_OFFSET);
BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
-BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET);
+#define BCMASP_CORE_IO_MACRO_OFFSET(name, offset) \
+static inline u32 name##_core_rl(struct bcmasp_priv *priv, \
+ u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + priv->name##_offset + \
+ (offset) + off); \
+ return reg; \
+} \
+static inline void name##_core_wl(struct bcmasp_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + priv->name##_offset + \
+ (offset) + off); \
+}
+BCMASP_CORE_IO_MACRO_OFFSET(rx_ctrl, ASP_RX_CTRL_OFFSET);
+
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i);
@@ -601,5 +596,5 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable);
+void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en);
#endif
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index 9da5ae29a105..4381a4cfd8c6 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -10,7 +10,6 @@
#include "bcmasp_intf_defs.h"
enum bcmasp_stat_type {
- BCMASP_STAT_RX_EDPKT,
BCMASP_STAT_RX_CTRL,
BCMASP_STAT_RX_CTRL_PER_INTF,
BCMASP_STAT_SOFT,
@@ -33,8 +32,6 @@ struct bcmasp_stats {
.reg_offset = offset, \
}
-#define STAT_BCMASP_RX_EDPKT(str, offset) \
- STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_EDPKT, offset)
#define STAT_BCMASP_RX_CTRL(str, offset) \
STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL, offset)
#define STAT_BCMASP_RX_CTRL_PER_INTF(str, offset) \
@@ -42,11 +39,6 @@ struct bcmasp_stats {
/* Must match the order of struct bcmasp_mib_counters */
static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
- /* EDPKT counters */
- STAT_BCMASP_RX_EDPKT("RX Time Stamp", ASP_EDPKT_RX_TS_COUNTER),
- STAT_BCMASP_RX_EDPKT("RX PKT Count", ASP_EDPKT_RX_PKT_CNT),
- STAT_BCMASP_RX_EDPKT("RX PKT Buffered", ASP_EDPKT_HDR_EXTR_CNT),
- STAT_BCMASP_RX_EDPKT("RX PKT Pushed to DRAM", ASP_EDPKT_HDR_OUT_CNT),
/* ASP RX control */
STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Unimac",
ASP_RX_CTRL_UMAC_0_FRAME_COUNT),
@@ -71,23 +63,6 @@ static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
#define BCMASP_STATS_LEN ARRAY_SIZE(bcmasp_gstrings_stats)
-static u16 bcmasp_stat_fixup_offset(struct bcmasp_intf *intf,
- const struct bcmasp_stats *s)
-{
- struct bcmasp_priv *priv = intf->parent;
-
- if (!strcmp("Frames Out(Buffer)", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_out_frame_count;
-
- if (!strcmp("Frames Out(Filters)", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_filt_out_frame_count;
-
- if (!strcmp("RX Buffer FIFO Depth", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_rx_fifo_depth;
-
- return s->reg_offset;
-}
-
static int bcmasp_get_sset_count(struct net_device *dev, int string_set)
{
switch (string_set) {
@@ -126,13 +101,10 @@ static void bcmasp_update_mib_counters(struct bcmasp_intf *intf)
char *p;
s = &bcmasp_gstrings_stats[i];
- offset = bcmasp_stat_fixup_offset(intf, s);
+ offset = s->reg_offset;
switch (s->type) {
case BCMASP_STAT_SOFT:
continue;
- case BCMASP_STAT_RX_EDPKT:
- val = rx_edpkt_core_rl(intf->parent, offset);
- break;
case BCMASP_STAT_RX_CTRL:
val = rx_ctrl_core_rl(intf->parent, offset);
break;
@@ -215,7 +187,7 @@ static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
memcpy(intf->sopass, wol->sopass, sizeof(wol->sopass));
mutex_lock(&priv->wol_lock);
- priv->enable_wol(intf, !!intf->wolopts);
+ bcmasp_enable_wol(intf, !!intf->wolopts);
mutex_unlock(&priv->wol_lock);
return 0;
@@ -289,7 +261,7 @@ static int bcmasp_flow_get(struct bcmasp_intf *intf, struct ethtool_rxnfc *cmd)
memcpy(&cmd->fs, &nfilter->fs, sizeof(nfilter->fs));
- cmd->data = NUM_NET_FILTERS;
+ cmd->data = intf->parent->num_net_filters;
return 0;
}
@@ -336,7 +308,7 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
break;
case ETHTOOL_GRXCLSRLALL:
err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
- cmd->data = NUM_NET_FILTERS;
+ cmd->data = intf->parent->num_net_filters;
break;
default:
err = -EOPNOTSUPP;
@@ -348,58 +320,19 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return err;
}
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
-{
- u32 reg;
-
- reg = umac_rl(intf, UMC_EEE_CTRL);
- if (enable)
- reg |= EEE_EN;
- else
- reg &= ~EEE_EN;
- umac_wl(intf, reg, UMC_EEE_CTRL);
-
- intf->eee.eee_enabled = enable;
-}
-
static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_keee *p = &intf->eee;
-
if (!dev->phydev)
return -ENODEV;
- e->tx_lpi_enabled = p->tx_lpi_enabled;
- e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
-
return phy_ethtool_get_eee(dev->phydev, e);
}
static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_keee *p = &intf->eee;
- int ret;
-
if (!dev->phydev)
return -ENODEV;
- if (!p->eee_enabled) {
- bcmasp_eee_enable_set(intf, false);
- } else {
- ret = phy_init_eee(dev->phydev, 0);
- if (ret) {
- netif_err(intf, hw, dev,
- "EEE initialization failed: %d\n", ret);
- return ret;
- }
-
- umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
- intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
- bcmasp_eee_enable_set(intf, true);
- }
-
return phy_ethtool_set_eee(dev->phydev, e);
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index cfd50efbdbc0..0d61b8580d72 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -180,14 +180,14 @@ static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
case htons(ETH_P_IP):
header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
- epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ epkt |= PKT_OFFLOAD_EPKT_IP(0);
ip_proto = ip_hdr(skb)->protocol;
header_cnt += 2;
break;
case htons(ETH_P_IPV6):
header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
- epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ epkt |= PKT_OFFLOAD_EPKT_IP(1);
ip_proto = ipv6_hdr(skb)->nexthdr;
header_cnt += 2;
break;
@@ -198,12 +198,12 @@ static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
switch (ip_proto) {
case IPPROTO_TCP:
header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
- epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L4;
header_cnt++;
break;
case IPPROTO_UDP:
header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
- epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L4;
header_cnt++;
break;
default:
@@ -619,7 +619,6 @@ static void bcmasp_adj_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
u32 cmd_bits = 0, reg;
int changed = 0;
- bool active;
if (intf->old_link != phydev->link) {
changed = 1;
@@ -677,8 +676,13 @@ static void bcmasp_adj_link(struct net_device *dev)
}
umac_wl(intf, reg, UMC_CMD);
- active = phy_init_eee(phydev, 0) >= 0;
- bcmasp_eee_enable_set(intf, active);
+ umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER);
+ reg = umac_rl(intf, UMC_EEE_CTRL);
+ if (phydev->enable_tx_lpi)
+ reg |= EEE_EN;
+ else
+ reg &= ~EEE_EN;
+ umac_wl(intf, reg, UMC_EEE_CTRL);
}
reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
@@ -814,9 +818,7 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
/* Tx SPB */
tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
TX_SPB_CTRL_XF_CTRL2);
- tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
- tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
@@ -1055,6 +1057,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
/* Indicate that the MAC is responsible for PHY PM */
phydev->mac_managed_pm = true;
+
+ /* Set phylib's copy of the LPI timer */
+ phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
}
umac_reset(intf);
@@ -1178,7 +1183,7 @@ static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
{
/* Per port */
intf->res.umac = priv->base + UMC_OFFSET(intf);
- intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
+ intf->res.umac2fb = priv->base + (UMAC2FB_OFFSET + priv->rx_ctrl_offset +
(intf->port * 0x4));
intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
@@ -1193,7 +1198,6 @@ static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
}
-#define MAX_IRQ_STR_LEN 64
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i)
{
@@ -1331,7 +1335,8 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
ASP_WAKEUP_INTR2_MASK_CLEAR);
}
- if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
intf->parent->eee_fixup(intf, true);
netif_dbg(intf, wol, ndev, "entered WOL mode\n");
@@ -1373,7 +1378,8 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
{
u32 reg;
- if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
intf->parent->eee_fixup(intf, false);
reg = umac_rl(intf, UMC_MPD_CTRL);
@@ -1404,9 +1410,6 @@ int bcmasp_interface_resume(struct bcmasp_intf *intf)
bcmasp_resume_from_wol(intf);
- if (intf->eee.eee_enabled)
- bcmasp_eee_enable_set(intf, true);
-
netif_device_attach(dev);
return 0;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
index ad742612895f..af7418348e81 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
@@ -118,8 +118,7 @@
#define UMC_PSW_MS 0x624
#define UMC_PSW_LS 0x628
-#define UMAC2FB_OFFSET_2_1 0x9f044
-#define UMAC2FB_OFFSET 0x9f03c
+#define UMAC2FB_OFFSET 0x9f044
#define UMAC2FB_CFG 0x0
#define UMAC2FB_CFG_OPUT_EN BIT(0)
#define UMAC2FB_CFG_VLAN_EN BIT(1)
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index e5809ad5eb82..8267417b3750 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -575,7 +575,7 @@ static void b44_check_phy(struct b44 *bp)
static void b44_timer(struct timer_list *t)
{
- struct b44 *bp = from_timer(bp, t, timer);
+ struct b44 *bp = timer_container_of(bp, t, timer);
spin_lock_irq(&bp->lock);
@@ -1628,7 +1628,7 @@ static int b44_close(struct net_device *dev)
napi_disable(&bp->napi);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
spin_lock_irq(&bp->lock);
@@ -2473,7 +2473,7 @@ static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
if (!netif_running(dev))
return 0;
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
spin_lock_irq(&bp->lock);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 65e3a0656a4c..92204fea1f08 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -286,7 +286,7 @@ static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
*/
static void bcm_enet_refill_rx_timer(struct timer_list *t)
{
- struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
+ struct bcm_enet_priv *priv = timer_container_of(priv, t, rx_timeout);
struct net_device *dev = priv->net_dev;
spin_lock(&priv->rx_lock);
@@ -1195,7 +1195,7 @@ static int bcm_enet_stop(struct net_device *dev)
napi_disable(&priv->napi);
if (priv->has_phy)
phy_stop(dev->phydev);
- del_timer_sync(&priv->rx_timeout);
+ timer_delete_sync(&priv->rx_timeout);
/* mask all interrupts */
enet_writel(priv, 0, ENET_IRMASK_REG);
@@ -2001,7 +2001,7 @@ static inline int bcm_enet_port_is_rgmii(int portid)
*/
static void swphy_poll_timer(struct timer_list *t)
{
- struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
+ struct bcm_enet_priv *priv = timer_container_of(priv, t, swphy_poll);
unsigned int i;
for (i = 0; i < priv->num_ports; i++) {
@@ -2346,10 +2346,10 @@ static int bcm_enetsw_stop(struct net_device *dev)
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
- del_timer_sync(&priv->swphy_poll);
+ timer_delete_sync(&priv->swphy_poll);
netif_stop_queue(dev);
napi_disable(&priv->napi);
- del_timer_sync(&priv->rx_timeout);
+ timer_delete_sync(&priv->rx_timeout);
/* mask all interrupts */
enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 42672c63f108..bc4e1f3b3752 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1933,7 +1933,11 @@ static int bcm_sysport_open(struct net_device *dev)
unsigned int i;
int ret;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
/* Reset UniMAC */
umac_reset(priv);
@@ -2591,7 +2595,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_deregister_notifier;
}
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable priv clock\n");
+ goto err_deregister_netdev;
+ }
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
@@ -2605,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
+err_deregister_netdev:
+ unregister_netdev(dev);
err_deregister_notifier:
unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
@@ -2774,7 +2784,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
+
if (priv->wolopts)
clk_disable_unprepare(priv->wol_clk);
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index ecce23cecbea..4e266ce41180 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -171,6 +171,7 @@ static int platform_phy_connect(struct bgmac *bgmac)
static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct device_node *phy_node;
struct bgmac *bgmac;
struct resource *regs;
int ret;
@@ -236,7 +237,9 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = platform_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32;
- if (of_parse_phandle(np, "phy-handle", 0)) {
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (phy_node) {
+ of_node_put(phy_node);
bgmac->phy_connect = platform_phy_connect;
} else {
bgmac->phy_connect = bgmac_phy_connect_direct;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index a461ec612e95..3e9c57196a39 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1446,7 +1446,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
struct phy_device *phy_dev;
int err;
- phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ phy_dev = fixed_phy_register(&fphy_status, NULL);
if (IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return PTR_ERR(phy_dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index d73ef262991d..6fee9a41839c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -328,8 +328,7 @@
#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
BGMAC_RX_FRAME_OFFSET)
-/* Jumbo frame size with FCS */
-#define BGMAC_RX_MAX_FRAME_SIZE 9724
+#define BGMAC_RX_MAX_FRAME_SIZE 1536
#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6ec773e61182..cb1011f6fd30 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6163,7 +6163,7 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
static void
bnx2_timer(struct timer_list *t)
{
- struct bnx2 *bp = from_timer(bp, t, timer);
+ struct bnx2 *bp = timer_container_of(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -6400,7 +6400,7 @@ bnx2_open(struct net_device *dev)
rc = bnx2_request_irq(bp);
if (rc) {
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
goto open_err;
}
bnx2_enable_int(bp);
@@ -6752,7 +6752,7 @@ bnx2_close(struct net_device *dev)
bnx2_disable_int_sync(bp);
bnx2_napi_disable(bp);
netif_tx_disable(dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnx2_shutdown_chip(bp);
bnx2_free_irq(bp);
bnx2_free_skbs(bp);
@@ -8602,7 +8602,7 @@ bnx2_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
cancel_work_sync(&bp->reset_task);
pci_iounmap(bp->pdev, bp->regview);
@@ -8629,7 +8629,7 @@ bnx2_suspend(struct device *device)
cancel_work_sync(&bp->reset_task);
bnx2_netif_stop(bp, true);
netif_device_detach(dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnx2_shutdown_chip(bp);
__bnx2_free_irq(bp);
bnx2_free_skbs(bp);
@@ -8687,7 +8687,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
if (netif_running(dev)) {
bnx2_netif_stop(bp, true);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a8e07e51418f..e59530357e2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3059,7 +3059,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->rx_mode = BNX2X_RX_MODE_NONE;
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* Set ALWAYS_ALIVE bit in shmem */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 678829646cec..c9a1a1d504c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -5783,7 +5783,7 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
static void bnx2x_timer(struct timer_list *t)
{
- struct bnx2x *bp = from_timer(bp, t, timer);
+ struct bnx2x *bp = timer_container_of(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -14140,7 +14140,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bnx2x_tx_disable(bp);
netdev_reset_tc(bp->dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
cancel_delayed_work_sync(&bp->sp_task);
cancel_delayed_work_sync(&bp->period_task);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8e04552d2216..02c8213915a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2593,7 +2593,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
/********************* Multicast verbs: SET, CLEAR ****************************/
static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
{
- return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+ return (crc32c(0, mac, ETH_ALEN) >> 24) & 0xff;
}
struct bnx2x_mcast_mac_elem {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4ec4934a4edd..ae89a981e052 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -54,7 +54,10 @@
#include <net/pkt_cls.h>
#include <net/page_pool/helpers.h>
#include <linux/align.h>
+#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <linux/pci-tph.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -76,12 +79,12 @@
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
NETIF_MSG_TX_ERR)
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
-#define BNXT_RX_COPY_THRESH 256
#define BNXT_TX_PUSH_THRESH 164
@@ -486,6 +489,17 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txr = &bp->tx_ring[bp->tx_ring_map[i]];
prod = txr->tx_prod;
+#if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
+ if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
+ netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
+ skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ dev_core_stats_tx_dropped_inc(dev);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
/* We must have raced with NAPI cleanup */
@@ -565,7 +579,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
TX_BD_FLAGS_LHINT_512_AND_SMALLER |
TX_BD_FLAGS_COAL_NOW |
TX_BD_FLAGS_PACKET_END |
- (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+ TX_BD_CNT(2));
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_push1->tx_bd_hsize_lflags =
@@ -640,7 +654,7 @@ normal_tx:
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
- ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+ TX_BD_CNT(last_frag + 2);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
@@ -773,7 +787,7 @@ tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
if (BNXT_TX_PTP_IS_SET(lflags)) {
- txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
+ txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
atomic64_inc(&bp->ptp_cfg->stats.ts_err);
if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
/* set SKB to err so PTP worker will clean up */
@@ -781,7 +795,7 @@ tx_kick_pending:
}
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
- txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+ txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -879,9 +893,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
bnapi->events &= ~BNXT_TX_CMP_EVENT;
}
-static bool bnxt_separate_head_pool(void)
+static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
{
- return PAGE_SIZE > BNXT_RX_PAGE_SIZE;
+ return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -905,6 +919,20 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
return page;
}
+static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
+ gfp_t gfp)
+{
+ netmem_ref netmem;
+
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ if (!netmem)
+ return 0;
+
+ *mapping = page_pool_get_dma_addr_netmem(netmem);
+ return netmem;
+}
+
static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
gfp_t gfp)
@@ -985,21 +1013,19 @@ static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
return next;
}
-static inline int bnxt_alloc_rx_page(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd =
&rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
- struct page *page;
- dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
+ dma_addr_t mapping;
+ netmem_ref netmem;
- page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
-
- if (!page)
+ netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp);
+ if (!netmem)
return -ENOMEM;
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
@@ -1009,7 +1035,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
- rx_agg_buf->page = page;
+ rx_agg_buf->netmem = netmem;
rx_agg_buf->offset = offset;
rx_agg_buf->mapping = mapping;
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
@@ -1053,11 +1079,11 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
- u16 cons;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_agg_cmp *agg;
struct rx_bd *prod_bd;
- struct page *page;
+ netmem_ref netmem;
+ u16 cons;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
@@ -1074,11 +1100,11 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
cons_rx_buf = &rxr->rx_agg_ring[cons];
/* It is possible for sw_prod to be equal to cons, so
- * set cons_rx_buf->page to NULL first.
+ * set cons_rx_buf->netmem to 0 first.
*/
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
- prod_rx_buf->page = page;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+ prod_rx_buf->netmem = netmem;
prod_rx_buf->offset = cons_rx_buf->offset;
prod_rx_buf->mapping = cons_rx_buf->mapping;
@@ -1204,29 +1230,35 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return skb;
}
-static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct skb_shared_info *shinfo,
- u16 idx, u32 agg_bufs, bool tpa,
- struct xdp_buff *xdp)
+static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct sk_buff *skb,
+ struct xdp_buff *xdp)
{
struct bnxt_napi *bnapi = cpr->bnapi;
- struct pci_dev *pdev = bp->pdev;
- struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- u16 prod = rxr->rx_agg_prod;
+ struct skb_shared_info *shinfo;
+ struct bnxt_rx_ring_info *rxr;
u32 i, total_frag_len = 0;
bool p5_tpa = false;
+ u16 prod;
+
+ rxr = bnapi->rx_ring;
+ prod = rxr->rx_agg_prod;
if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
+ if (skb)
+ shinfo = skb_shinfo(skb);
+ else
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+
for (i = 0; i < agg_bufs; i++) {
- skb_frag_t *frag = &shinfo->frags[i];
- u16 cons, frag_len;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
- struct page *page;
- dma_addr_t mapping;
+ struct rx_agg_cmp *agg;
+ u16 cons, frag_len;
+ netmem_ref netmem;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
@@ -1237,27 +1269,41 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_frag_fill_page_desc(frag, cons_rx_buf->page,
- cons_rx_buf->offset, frag_len);
- shinfo->nr_frags = i + 1;
+ if (skb) {
+ skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len, BNXT_RX_PAGE_SIZE);
+ } else {
+ skb_frag_t *frag = &shinfo->frags[i];
+
+ skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len);
+ shinfo->nr_frags = i + 1;
+ }
__clear_bit(cons, rxr->rx_agg_bmap);
- /* It is possible for bnxt_alloc_rx_page() to allocate
+ /* It is possible for bnxt_alloc_rx_netmem() to allocate
* a sw_prod index that equals the cons index, so we
* need to clear the cons entry now.
*/
- mapping = cons_rx_buf->mapping;
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
- if (xdp && page_is_pfmemalloc(page))
+ if (xdp && netmem_is_pfmemalloc(netmem))
xdp_buff_set_frag_pfmemalloc(xdp);
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (skb) {
+ skb->len -= frag_len;
+ skb->data_len -= frag_len;
+ skb->truesize -= BNXT_RX_PAGE_SIZE;
+ }
+
--shinfo->nr_frags;
- cons_rx_buf->page = page;
+ cons_rx_buf->netmem = netmem;
- /* Update prod since possibly some pages have been
+ /* Update prod since possibly some netmems have been
* allocated already.
*/
rxr->rx_agg_prod = prod;
@@ -1265,8 +1311,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return 0;
}
- dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- bp->rx_dir);
+ page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
+ BNXT_RX_PAGE_SIZE);
total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
@@ -1275,32 +1321,28 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return total_frag_len;
}
-static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 idx,
- u32 agg_bufs, bool tpa)
+static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 total_frag_len = 0;
- total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
- agg_bufs, tpa, NULL);
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ skb, NULL);
if (!total_frag_len) {
skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
- skb->data_len += total_frag_len;
- skb->len += total_frag_len;
- skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
return skb;
}
-static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct xdp_buff *xdp, u16 idx,
- u32 agg_bufs, bool tpa)
+static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct xdp_buff *xdp, u16 idx,
+ u32 agg_bufs, bool tpa)
{
struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
u32 total_frag_len = 0;
@@ -1308,8 +1350,8 @@ static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
if (!xdp_buff_has_frags(xdp))
shinfo->nr_frags = 0;
- total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
- idx, agg_bufs, tpa, xdp);
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ NULL, xdp);
if (total_frag_len) {
xdp_buff_set_frags_flag(xdp);
shinfo->nr_frags = agg_bufs;
@@ -1343,13 +1385,13 @@ static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
if (!skb)
return NULL;
- dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
len + NET_IP_ALIGN);
- dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
skb_put(skb, len);
@@ -1531,7 +1573,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
if (TPA_START_IS_IPV6(tpa_start1))
tpa_info->gso_type = SKB_GSO_TCPV6;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
+ else if (!BNXT_CHIP_P4_PLUS(bp) &&
TPA_START_HASH_TYPE(tpa_start) == 3)
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
@@ -1842,7 +1884,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
@@ -1881,7 +1923,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
+ true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
cpr->sw_stats->rx.rx_oom_discards += 1;
@@ -2001,6 +2044,7 @@ static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
}
return skb;
vlan_err:
+ skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
@@ -2039,6 +2083,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct skb_shared_info *sinfo;
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
@@ -2160,11 +2205,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (bnxt_xdp_attached(bp, rxr)) {
bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
if (agg_bufs) {
- u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
- cp_cons, agg_bufs,
- false);
+ u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
+ cp_cons,
+ agg_bufs,
+ false);
if (!frag_len)
goto oom_next_rx;
+
}
xdp_active = true;
}
@@ -2174,9 +2221,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = 1;
goto next_rx;
}
+ if (xdp_buff_has_frags(&xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ agg_bufs = sinfo->nr_frags;
+ } else {
+ agg_bufs = 0;
+ }
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
if (!xdp_active)
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
else
@@ -2207,11 +2260,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (agg_bufs) {
if (!xdp_active) {
- skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
+ agg_bufs, false);
if (!skb)
goto oom_next_rx;
} else {
- skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
+ rxr->page_pool, &xdp);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
@@ -2226,15 +2281,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
type = bnxt_rss_ext_op(bp, rxcmp);
} else {
- u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+ u32 itypes = RX_CMP_ITYPES(rxcmp);
- /* RSS profiles 1 and 3 with extract code 0 for inner
- * 4-tuple
- */
- if (hash_type != 1 && hash_type != 3)
- type = PKT_HASH_TYPE_L3;
- else
+ if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
+ itypes == RX_CMP_FLAGS_ITYPE_UDP)
type = PKT_HASH_TYPE_L4;
+ else
+ type = PKT_HASH_TYPE_L3;
}
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
}
@@ -2857,6 +2910,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
__bnxt_queue_sp_work(bp);
async_event_process_exit:
+ bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -2899,6 +2953,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
return 0;
}
+static bool bnxt_vnic_is_active(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+
+ return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
+}
+
static irqreturn_t bnxt_msix(int irq, void *dev_instance)
{
struct bnxt_napi *bnapi = dev_instance;
@@ -2928,6 +2989,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
{
struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
+ bool flush_xdp = false;
u32 cons;
int rx_pkts = 0;
u8 event = 0;
@@ -2981,6 +3043,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
&event);
+ if (event & BNXT_REDIRECT_EVENT)
+ flush_xdp = true;
if (likely(rc >= 0))
rx_pkts += rc;
/* Increment rx_pkts when rc is -ENOMEM to count towards
@@ -3005,7 +3069,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (event & BNXT_REDIRECT_EVENT) {
+ if (flush_xdp) {
xdp_do_flush();
event &= ~BNXT_REDIRECT_EVENT;
}
@@ -3166,7 +3230,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
- if (bp->flags & BNXT_FLAG_DIM) {
+ if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -3297,7 +3361,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
poll_done:
cpr_rx = &cpr->cp_ring_arr[0];
if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
- (bp->flags & BNXT_FLAG_DIM)) {
+ (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -3309,75 +3373,85 @@ poll_done:
return work_done;
}
-static void bnxt_free_tx_skbs(struct bnxt *bp)
+static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, int idx)
{
int i, max_idx;
struct pci_dev *pdev = bp->pdev;
- if (!bp->tx_ring)
- return;
-
max_idx = bp->tx_nr_pages * TX_DESC_CNT;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- int j;
- if (!txr->tx_buf_ring)
+ for (i = 0; i < max_idx;) {
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
+ struct sk_buff *skb;
+ int j, last;
+
+ if (idx < bp->tx_nr_rings_xdp &&
+ tx_buf->action == XDP_REDIRECT) {
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ xdp_return_frame(tx_buf->xdpf);
+ tx_buf->action = 0;
+ tx_buf->xdpf = NULL;
+ i++;
continue;
+ }
- for (j = 0; j < max_idx;) {
- struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
- struct sk_buff *skb;
- int k, last;
-
- if (i < bp->tx_nr_rings_xdp &&
- tx_buf->action == XDP_REDIRECT) {
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- xdp_return_frame(tx_buf->xdpf);
- tx_buf->action = 0;
- tx_buf->xdpf = NULL;
- j++;
- continue;
- }
+ skb = tx_buf->skb;
+ if (!skb) {
+ i++;
+ continue;
+ }
- skb = tx_buf->skb;
- if (!skb) {
- j++;
- continue;
- }
+ tx_buf->skb = NULL;
- tx_buf->skb = NULL;
+ if (tx_buf->is_push) {
+ dev_kfree_skb(skb);
+ i += 2;
+ continue;
+ }
- if (tx_buf->is_push) {
- dev_kfree_skb(skb);
- j += 2;
- continue;
- }
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_headlen(skb),
- DMA_TO_DEVICE);
+ last = tx_buf->nr_frags;
+ i += 2;
+ for (j = 0; j < last; j++, i++) {
+ int ring_idx = i & bp->tx_ring_mask;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
- last = tx_buf->nr_frags;
- j += 2;
- for (k = 0; k < last; k++, j++) {
- int ring_idx = j & bp->tx_ring_mask;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
-
- tx_buf = &txr->tx_buf_ring[ring_idx];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
- dev_kfree_skb(skb);
+ tx_buf = &txr->tx_buf_ring[ring_idx];
+ dma_unmap_page(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+ dev_kfree_skb(skb);
}
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
+}
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->tx_ring)
+ return;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+
+ if (!txr->tx_buf_ring)
+ continue;
+
+ bnxt_free_one_tx_ring_skbs(bp, txr, i);
+ }
+
+ if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
}
static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
@@ -3409,27 +3483,23 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
- struct page *page = rx_agg_buf->page;
+ netmem_ref netmem = rx_agg_buf->netmem;
- if (!page)
+ if (!netmem)
continue;
- rx_agg_buf->page = NULL;
+ rx_agg_buf->netmem = 0;
__clear_bit(i, rxr->rx_agg_bmap);
- page_pool_recycle_direct(rxr->page_pool, page);
+ page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
}
}
-static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
+static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- struct bnxt_tpa_idx_map *map;
int i;
- if (!rxr->rx_tpa)
- goto skip_rx_tpa_free;
-
for (i = 0; i < bp->max_tpa; i++) {
struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
u8 *data = tpa_info->data;
@@ -3440,6 +3510,17 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
tpa_info->data = NULL;
page_pool_free_va(rxr->head_pool, data, false);
}
+}
+
+static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_tpa_idx_map *map;
+
+ if (!rxr->rx_tpa)
+ goto skip_rx_tpa_free;
+
+ bnxt_free_one_tpa_info_data(bp, rxr);
skip_rx_tpa_free:
if (!rxr->rx_buf_ring)
@@ -3467,7 +3548,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
return;
for (i = 0; i < bp->rx_nr_rings; i++)
- bnxt_free_one_rx_ring_skbs(bp, i);
+ bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
}
static void bnxt_free_skbs(struct bnxt *bp)
@@ -3608,29 +3689,64 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
return 0;
}
+static void bnxt_free_one_tpa_info(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ int i;
+
+ kfree(rxr->rx_tpa_idx_map);
+ rxr->rx_tpa_idx_map = NULL;
+ if (rxr->rx_tpa) {
+ for (i = 0; i < bp->max_tpa; i++) {
+ kfree(rxr->rx_tpa[i].agg_arr);
+ rxr->rx_tpa[i].agg_arr = NULL;
+ }
+ }
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+}
+
static void bnxt_free_tpa_info(struct bnxt *bp)
{
- int i, j;
+ int i;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- kfree(rxr->rx_tpa_idx_map);
- rxr->rx_tpa_idx_map = NULL;
- if (rxr->rx_tpa) {
- for (j = 0; j < bp->max_tpa; j++) {
- kfree(rxr->rx_tpa[j].agg_arr);
- rxr->rx_tpa[j].agg_arr = NULL;
- }
- }
- kfree(rxr->rx_tpa);
- rxr->rx_tpa = NULL;
+ bnxt_free_one_tpa_info(bp, rxr);
}
}
+static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct rx_agg_cmp *agg;
+ int i;
+
+ rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return 0;
+ for (i = 0; i < bp->max_tpa; i++) {
+ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+ if (!agg)
+ return -ENOMEM;
+ rxr->rx_tpa[i].agg_arr = agg;
+ }
+ rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa_idx_map)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int bnxt_alloc_tpa_info(struct bnxt *bp)
{
- int i, j;
+ int i, rc;
bp->max_tpa = MAX_TPA;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
@@ -3641,25 +3757,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct rx_agg_cmp *agg;
- rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
- GFP_KERNEL);
- if (!rxr->rx_tpa)
- return -ENOMEM;
-
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- continue;
- for (j = 0; j < bp->max_tpa; j++) {
- agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
- if (!agg)
- return -ENOMEM;
- rxr->rx_tpa[j].agg_arr = agg;
- }
- rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
- GFP_KERNEL);
- if (!rxr->rx_tpa_idx_map)
- return -ENOMEM;
+ rc = bnxt_alloc_one_tpa_info(bp, rxr);
+ if (rc)
+ return rc;
}
return 0;
}
@@ -3683,7 +3784,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- if (rxr->page_pool != rxr->head_pool)
+ if (bnxt_separate_head_pool(rxr))
page_pool_destroy(rxr->head_pool);
rxr->page_pool = rxr->head_pool = NULL;
@@ -3714,15 +3815,19 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
- pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
+ PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rxr->bnapi->index;
pool = page_pool_create(&pp);
if (IS_ERR(pool))
return PTR_ERR(pool);
rxr->page_pool = pool;
- if (bnxt_separate_head_pool()) {
+ rxr->need_head_pool = page_pool_is_unreadable(pool);
+ if (bnxt_separate_head_pool(rxr)) {
pp.pool_size = max(bp->rx_ring_size, 1024);
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pool = page_pool_create(&pp);
if (IS_ERR(pool))
goto err_destroy_pp;
@@ -3737,6 +3842,19 @@ err_destroy_pp:
return PTR_ERR(pool);
}
+static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
int numa_node = dev_to_node(&bp->pdev->dev);
@@ -3781,19 +3899,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
ring->grp_idx = i;
if (agg_rings) {
- u16 mem_size;
-
ring = &rxr->rx_agg_ring_struct;
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
ring->grp_idx = i;
- rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
- mem_size = rxr->rx_agg_bmap_size / 8;
- rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
- if (!rxr->rx_agg_bmap)
- return -ENOMEM;
+ rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
+ if (rc)
+ return rc;
}
}
if (bp->flags & BNXT_FLAG_TPA)
@@ -4125,6 +4239,8 @@ static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
rxr->page_pool->p.napi = NULL;
rxr->page_pool = NULL;
+ rxr->head_pool->p.napi = NULL;
+ rxr->head_pool = NULL;
memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
ring = &rxr->rx_ring_struct;
@@ -4249,16 +4365,16 @@ static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
rxr->rx_prod = prod;
}
-static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- int ring_nr)
+static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
{
u32 prod;
int i;
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size);
break;
@@ -4268,32 +4384,43 @@ static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
rxr->rx_agg_prod = prod;
}
+static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ dma_addr_t mapping;
+ u8 *data;
+ int i;
+
+ for (i = 0; i < bp->max_tpa; i++) {
+ data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
+ rxr->rx_tpa[i].mapping = mapping;
+ }
+
+ return 0;
+}
+
static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
{
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- int i;
+ int rc;
bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
- bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
+ bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
if (rxr->rx_tpa) {
- dma_addr_t mapping;
- u8 *data;
-
- for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- rxr->rx_tpa[i].data = data;
- rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
- rxr->rx_tpa[i].mapping = mapping;
- }
+ rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
+ if (rc)
+ return rc;
}
return 0;
}
@@ -4556,6 +4683,17 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
bp->flags |= BNXT_FLAG_GRO;
}
+static void bnxt_init_ring_params(struct bnxt *bp)
+{
+ unsigned int rx_size;
+
+ bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
+ /* Try to fit 4 chunks into a 4k page */
+ rx_size = SZ_1K -
+ NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
+}
+
/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
* be set on entry.
*/
@@ -4570,12 +4708,11 @@ void bnxt_set_ring_params(struct bnxt *bp)
rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
ring_size = bp->rx_ring_size;
bp->rx_agg_ring_size = 0;
bp->rx_agg_nr_pages = 0;
- if (bp->flags & BNXT_FLAG_TPA)
+ if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
@@ -4615,7 +4752,10 @@ void bnxt_set_ring_params(struct bnxt *bp)
ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} else {
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak,
+ bp->dev->cfg_pending->hds_thresh);
+ rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -4656,7 +4796,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* Changing allocation mode of RX rings.
* TODO: Update when extending xdp_rxq_info to support allocation modes.
*/
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
struct net_device *dev = bp->dev;
@@ -4677,15 +4817,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
bp->rx_skb_func = bnxt_rx_page_skb;
}
bp->rx_dir = DMA_BIDIRECTIONAL;
- /* Disable LRO or GRO_HW */
- netdev_update_features(dev);
} else {
dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;
}
- return 0;
+}
+
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ __bnxt_set_rx_skb_mode(bp, page_mode);
+
+ if (!page_mode) {
+ int rx, tx;
+
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+
+ /* Update LRO and GRO_HW availability */
+ netdev_update_features(bp->dev);
}
static void bnxt_free_vnic_attributes(struct bnxt *bp)
@@ -5156,8 +5311,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
- /* Under rtnl_lock and all our NAPIs have been disabled. It's
- * safe to delete the hash table.
+ netdev_assert_locked(bp->dev);
+
+ /* Under netdev instance lock and all our NAPIs have been disabled.
+ * It's safe to delete the hash table.
*/
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
struct hlist_head *head;
@@ -5485,6 +5642,8 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
req->flags = cpu_to_le32(flags);
req->ver_maj_8b = DRV_VER_MAJ;
req->ver_min_8b = DRV_VER_MIN;
@@ -6512,6 +6671,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
+ u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
struct hwrm_vnic_plcmodes_cfg_input *req;
int rc;
@@ -6521,16 +6681,14 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+ req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- if (BNXT_RX_PAGE_MODE(bp)) {
- req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- } else {
+ if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
req->enables |=
cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
- req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
- req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
}
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return hwrm_req_send(bp, req);
@@ -6856,6 +7014,30 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
hwrm_req_drop(bp, req);
}
+static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
+ struct hwrm_ring_alloc_input *req,
+ struct bnxt_ring_struct *ring)
+{
+ struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
+ u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
+ RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
+
+ if (ring_type == HWRM_RING_ALLOC_AGG) {
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
+ } else {
+ req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
+ if (NET_IP_ALIGN == 2)
+ req->flags =
+ cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
+ }
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req->enables |= cpu_to_le32(enables);
+}
+
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, u32 map_index)
@@ -6907,37 +7089,13 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
break;
}
case HWRM_RING_ALLOC_RX:
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- req->length = cpu_to_le32(bp->rx_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 flags = 0;
-
- /* Association of rx ring with stats context */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- if (NET_IP_ALIGN == 2)
- flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
- req->flags = cpu_to_le16(flags);
- }
- break;
case HWRM_RING_ALLOC_AGG:
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
- /* Association of agg ring with rx ring */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
- req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- } else {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- }
- req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
+ cpu_to_le32(bp->rx_ring_mask + 1) :
+ cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
break;
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -7118,6 +7276,39 @@ static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
return 0;
}
+static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ const u32 type = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ struct bnxt_ring_struct *ring;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ ring = &cpr->cp_ring_struct;
+ ring->handle = BNXT_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ return 0;
+}
+
+static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, u32 tx_idx)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ const u32 type = HWRM_RING_ALLOC_TX;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
+ return 0;
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
@@ -7154,33 +7345,17 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
}
- type = HWRM_RING_ALLOC_TX;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring;
- u32 map_idx;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
- struct bnxt_napi *bnapi = txr->bnapi;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
-
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_SET_NQ_HDL(cpr2);
- map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
- ring = &txr->tx_ring_struct;
- map_idx = i;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
if (rc)
goto err_out;
- bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -7193,20 +7368,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
- struct bnxt_napi *bnapi = rxr->bnapi;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
- struct bnxt_ring_struct *ring;
- u32 map_idx = bnapi->index;
-
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_SET_NQ_HDL(cpr2);
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
}
@@ -7221,6 +7385,26 @@ err_out:
return rc;
}
+static void bnxt_cancel_dim(struct bnxt *bp)
+{
+ int i;
+
+ /* DIM work is initialized in bnxt_enable_napi(). Proceed only
+ * if NAPI is enabled.
+ */
+ if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
+ return;
+
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_napi *bnapi = rxr->bnapi;
+
+ cancel_work_sync(&bnapi->cp_ring.dim.work);
+ }
+}
+
static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, int cmpl_ring_id)
@@ -7254,6 +7438,23 @@ exit:
return 0;
}
+static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
bool close_path)
@@ -7298,6 +7499,33 @@ static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
}
+static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring;
+
+ ring = &cpr->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ int i, size = ring->ring_mem.page_size;
+
+ cpr->cp_raw_cons = 0;
+ cpr->toggle = 0;
+
+ for (i = 0; i < bp->cp_nr_pages; i++)
+ if (cpr->cp_desc_ring[i])
+ memset(cpr->cp_desc_ring[i], 0, size);
+}
+
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
u32 type;
@@ -7306,21 +7534,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
if (!bp->bnapi)
return;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
-
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_TX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
+ for (i = 0; i < bp->tx_nr_rings; i++)
+ bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
+ bnxt_cancel_dim(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
@@ -7342,17 +7559,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_ring_struct *ring;
int j;
- for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
- struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
+ bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
- ring = &cpr2->cp_ring_struct;
- if (ring->fw_ring_id == INVALID_HW_RING_ID)
- continue;
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring, type,
@@ -8234,16 +8443,20 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (rc)
goto func_qcfg_exit;
+ flags = le16_to_cpu(resp->flags);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp)) {
struct bnxt_vf_info *vf = &bp->vf;
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
+ vf->flags |= BNXT_VF_TRUST;
+ else
+ vf->flags &= ~BNXT_VF_TRUST;
} else {
bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
}
#endif
- flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
@@ -8261,6 +8474,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
bp->port_partition_type = resp->port_partition_type;
@@ -8320,7 +8534,7 @@ static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
int n = 1;
- if (!ctxm->max_entries)
+ if (!ctxm->max_entries || ctxm->pg_info)
continue;
if (ctxm->instance_bmap)
@@ -8924,8 +9138,8 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
continue;
}
bnxt_bs_trace_init(bp, ctxm);
- last_type = type;
}
+ last_type = type;
}
if (last_type == BNXT_CTX_INV) {
@@ -9072,10 +9286,18 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
- extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
- /* allocate extra qps if fw supports RoCE fast qp destroy feature */
- extra_qps += fast_qpmd_qps;
- extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (BNXT_SW_RES_LMT(bp)) {
+ extra_qps = max_qps - l2_qps - qp1_qps;
+ extra_srqs = max_srqs - srqs;
+ } else {
+ extra_qps = min_t(u32, 65536,
+ max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fw supports RoCE fast qp
+ * destroy feature
+ */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ }
if (fast_qpmd_qps)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
}
@@ -9111,14 +9333,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
goto skip_rdma;
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
- /* 128K extra is needed to accommodate static AH context
- * allocation by f/w.
- */
- num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
- num_ah = min_t(u32, num_mr, 1024 * 128);
- ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
- if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
- ctxm->mrav_av_entries = num_ah;
+ if (BNXT_SW_RES_LMT(bp) &&
+ ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
+ num_ah = ctxm->mrav_av_entries;
+ num_mr = ctxm->max_entries - num_ah;
+ } else {
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+ }
rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
if (rc)
@@ -9411,6 +9639,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
@@ -9425,6 +9655,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
+ if (flags_ext2 &
+ FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
if (BNXT_PF(bp) &&
(flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
@@ -9884,7 +10117,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
struct hwrm_ver_get_input *req;
u16 fw_maj, fw_min, fw_bld, fw_rsv;
u32 dev_caps_cfg, hwrm_ver;
- int rc, len;
+ int rc, len, max_tmo_secs;
rc = hwrm_req_init(bp, req, HWRM_VER_GET);
if (rc)
@@ -9957,9 +10190,14 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
if (!bp->hwrm_cmd_max_timeout)
bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
- else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
- netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
- bp->hwrm_cmd_max_timeout / 1000);
+ max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
+#ifdef CONFIG_DETECT_HUNG_TASK
+ if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
+ max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
+ netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
+ max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
+ }
+#endif
if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
@@ -10545,6 +10783,72 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
bp->num_rss_ctx--;
}
+static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int rxr_id)
+{
+ u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ int i, vnic_rx;
+
+ /* Ntuple VNIC always has all the rx rings. Any change of ring id
+ * must be updated because a future filter may use it.
+ */
+ if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
+ return true;
+
+ for (i = 0; i < tbl_size; i++) {
+ if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
+ vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
+ else
+ vnic_rx = bp->rss_indir_tbl[i];
+
+ if (rxr_id == vnic_rx)
+ return true;
+ }
+
+ return false;
+}
+
+static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u16 mru, int rxr_id)
+{
+ int rc;
+
+ if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
+ return 0;
+
+ if (mru) {
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ }
+ vnic->mru = mru;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+
+ return 0;
+}
+
+static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+ int rc;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
{
bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
@@ -11116,6 +11420,155 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
return 0;
}
+static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int i;
+
+ bnapi = bp->bnapi[idx];
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
+ synchronize_net();
+
+ if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq) {
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+ }
+ }
+
+ if (!bp->tph_mode)
+ continue;
+
+ bnxt_hwrm_tx_ring_free(bp, txr, true);
+ bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
+ bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
+ bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
+ }
+}
+
+static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int rc, i;
+
+ bnapi = bp->bnapi[idx];
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ if (!bp->tph_mode)
+ goto start_tx;
+
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
+ if (rc)
+ return rc;
+
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+ txr->tx_hw_cons = 0;
+start_tx:
+ WRITE_ONCE(txr->dev_state, 0);
+ synchronize_net();
+
+ if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
+ continue;
+
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq)
+ netif_tx_start_queue(txq);
+ }
+
+ return 0;
+}
+
+static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct bnxt_irq *irq;
+ u16 tag;
+ int err;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ cpumask_copy(irq->cpu_mask, mask);
+
+ if (irq->ring_nr >= irq->bp->rx_nr_rings)
+ return;
+
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask), &tag))
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
+ return;
+
+ netdev_lock(irq->bp->dev);
+ if (netif_running(irq->bp->dev)) {
+ err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
+ if (err)
+ netdev_err(irq->bp->dev,
+ "RX queue restart failed: err=%d\n", err);
+ }
+ netdev_unlock(irq->bp->dev);
+}
+
+static void bnxt_irq_affinity_release(struct kref *ref)
+{
+ struct irq_affinity_notify *notify =
+ container_of(ref, struct irq_affinity_notify, kref);
+ struct bnxt_irq *irq;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
+ netdev_err(irq->bp->dev,
+ "Setting ST=0 for MSIX entry %d failed\n",
+ irq->msix_nr);
+ return;
+ }
+}
+
+static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
+{
+ irq_set_affinity_notifier(irq->vector, NULL);
+}
+
+static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
+{
+ struct irq_affinity_notify *notify;
+
+ irq->bp = bp;
+
+ /* Nothing to do if TPH is not enabled */
+ if (!bp->tph_mode)
+ return;
+
+ /* Register IRQ affinity notifier */
+ notify = &irq->affinity_notify;
+ notify->irq = irq->vector;
+ notify->notify = bnxt_irq_affinity_notify;
+ notify->release = bnxt_irq_affinity_release;
+
+ irq_set_affinity_notifier(irq->vector, notify);
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -11138,11 +11591,18 @@ static void bnxt_free_irq(struct bnxt *bp)
free_cpumask_var(irq->cpu_mask);
irq->have_cpumask = 0;
}
+
+ bnxt_release_irq_notifier(irq);
+
free_irq(irq->vector, bp->bnapi[i]);
}
irq->requested = 0;
}
+
+ /* Disable TPH support */
+ pcie_disable_tph(bp->pdev);
+ bp->tph_mode = 0;
}
static int bnxt_request_irq(struct bnxt *bp)
@@ -11162,6 +11622,12 @@ static int bnxt_request_irq(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
rmap = bp->dev->rx_cpu_rmap;
#endif
+
+ /* Enable TPH support as part of IRQ request */
+ rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
+ if (!rc)
+ bp->tph_mode = PCI_TPH_ST_IV_MODE;
+
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
@@ -11180,13 +11646,16 @@ static int bnxt_request_irq(struct bnxt *bp)
if (rc)
break;
- netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
+ netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
int numa_node = dev_to_node(&bp->pdev->dev);
+ u16 tag;
irq->have_cpumask = 1;
+ irq->msix_nr = map_idx;
+ irq->ring_nr = i;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
irq->cpu_mask);
rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
@@ -11196,6 +11665,16 @@ static int bnxt_request_irq(struct bnxt *bp)
irq->vector);
break;
}
+
+ bnxt_register_irq_notifier(bp, irq);
+
+ /* Init ST table entry */
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask),
+ &tag))
+ continue;
+
+ pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
}
}
return rc;
@@ -11216,9 +11695,9 @@ static void bnxt_del_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- __netif_napi_del(&bnapi->napi);
+ __netif_napi_del_locked(&bnapi->napi);
}
- /* We called __netif_napi_del(), we need
+ /* We called __netif_napi_del_locked(), we need
* to respect an RCU grace period before freeing napi structures.
*/
synchronize_net();
@@ -11235,14 +11714,17 @@ static void bnxt_init_napi(struct bnxt *bp)
poll_fn = bnxt_poll_p5;
else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
+
+ set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
+
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn,
- bnapi->index);
+ netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
+ bnapi->index);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
+ netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
}
}
@@ -11263,9 +11745,7 @@ static void bnxt_disable_napi(struct bnxt *bp)
cpr->sw_stats->tx.tx_resets++;
if (bnapi->in_reset)
cpr->sw_stats->rx.rx_resets++;
- napi_disable(&bnapi->napi);
- if (bnapi->rx_ring)
- cancel_work_sync(&cpr->dim.work);
+ napi_disable_locked(&bnapi->napi);
}
}
@@ -11287,7 +11767,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
- napi_enable(&bnapi->napi);
+ napi_enable_locked(&bnapi->napi);
}
}
@@ -11485,6 +11965,26 @@ hwrm_phy_qcaps_exit:
return rc;
}
+static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_mac_qcaps_output *resp;
+ struct hwrm_port_mac_qcaps_input *req;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10a03)
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
+ if (rc)
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->mac_flags = resp->flags;
+ hwrm_req_drop(bp, req);
+}
+
static bool bnxt_support_dropped(u16 advertising, u16 supported)
{
u16 diff = advertising ^ supported;
@@ -11936,11 +12436,15 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp;
struct hwrm_func_drv_if_change_input *req;
- bool fw_reset = !bp->irq_tbl;
bool resc_reinit = false;
+ bool caps_change = false;
int rc, retry = 0;
+ bool fw_reset;
u32 flags = 0;
+ fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
+ bp->fw_reset_state = 0;
+
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
return 0;
@@ -11993,8 +12497,11 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return -ENODEV;
}
- if (resc_reinit || fw_reset) {
- if (fw_reset) {
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
+ caps_change = true;
+
+ if (resc_reinit || fw_reset || caps_change) {
+ if (fw_reset || caps_change) {
set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_irq_stop(bp);
@@ -12006,13 +12513,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return rc;
}
+ /* IRQ will be initialized later in bnxt_request_irq()*/
bnxt_clear_int_mode(bp);
- rc = bnxt_init_int_mode(bp);
- if (rc) {
- clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
- netdev_err(bp->dev, "init int mode failed\n");
- return rc;
- }
}
rc = bnxt_cancel_reservations(bp, fw_reset);
}
@@ -12411,8 +12913,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
- if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
- WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
@@ -12430,7 +12930,6 @@ open_err_free_mem:
return rc;
}
-/* rtnl_lock held */
int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -12441,14 +12940,14 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
if (rc) {
netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
return rc;
}
-/* rtnl_lock held, open the NIC half way by allocating all resources, but
- * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
- * self tests.
+/* netdev instance lock held, open the NIC half way by allocating all
+ * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
+ * for offline self tests.
*/
int bnxt_half_open_nic(struct bnxt *bp)
{
@@ -12479,12 +12978,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
bnxt_free_mem(bp, true);
- dev_close(bp->dev);
+ netif_close(bp->dev);
return rc;
}
-/* rtnl_lock held, this call can only be made after a previous successful
- * call to bnxt_half_open_nic().
+/* netdev instance lock held, this call can only be made after a previous
+ * successful call to bnxt_half_open_nic().
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
@@ -12573,7 +13072,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
@@ -12593,10 +13092,11 @@ void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
/* If we get here, it means firmware reset is in progress
* while we are trying to close. We can safely proceed with
- * the close because we are holding rtnl_lock(). Some firmware
- * messages may fail as we proceed to close. We set the
- * ABORT_ERR flag here so that the FW reset thread will later
- * abort when it gets the rtnl_lock() and sees the flag.
+ * the close because we are holding netdev instance lock.
+ * Some firmware messages may fail as we proceed to close.
+ * We set the ABORT_ERR flag here so that the FW reset thread
+ * will later abort when it gets the netdev instance lock
+ * and sees the flag.
*/
netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -12687,7 +13187,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
return hwrm_req_send(bp, req);
}
-/* rtnl_lock held */
+/* netdev instance lock held */
static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mdio = if_mii(ifr);
@@ -13564,7 +14064,7 @@ fw_reset:
static void bnxt_timer(struct timer_list *t)
{
- struct bnxt *bp = from_timer(bp, t, timer);
+ struct bnxt *bp = timer_container_of(bp, t, timer);
struct net_device *dev = bp->dev;
if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
@@ -13606,20 +14106,36 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-static void bnxt_rtnl_lock_sp(struct bnxt *bp)
+static void bnxt_lock_sp(struct bnxt *bp)
{
/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
* set. If the device is being closed, bnxt_close() may be holding
- * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
- * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
+ * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
+ * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
+ * instance lock.
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ netdev_lock(bp->dev);
+}
+
+static void bnxt_unlock_sp(struct bnxt *bp)
+{
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ netdev_unlock(bp->dev);
+}
+
+/* Same as bnxt_lock_sp() with additional rtnl_lock */
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
+{
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_lock();
+ netdev_lock(bp->dev);
}
static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
@@ -13663,7 +14179,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_reset_task(bp, true);
break;
}
- bnxt_free_one_rx_ring_skbs(bp, i);
+ bnxt_free_one_rx_ring_skbs(bp, rxr);
rxr->rx_prod = 0;
rxr->rx_agg_prod = 0;
rxr->rx_sw_agg_prod = 0;
@@ -13734,7 +14250,7 @@ static bool is_bnxt_fw_ok(struct bnxt *bp)
return false;
}
-/* rtnl_lock is acquired before calling this function */
+/* netdev instance lock is acquired before calling this function */
static void bnxt_force_fw_reset(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -13777,9 +14293,9 @@ void bnxt_fw_exception(struct bnxt *bp)
netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
bnxt_ulp_stop(bp);
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
bnxt_force_fw_reset(bp);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
@@ -13809,7 +14325,7 @@ static int bnxt_get_registered_vfs(struct bnxt *bp)
void bnxt_fw_reset(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -13832,7 +14348,7 @@ void bnxt_fw_reset(struct bnxt *bp)
netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
n);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- dev_close(bp->dev);
+ netif_close(bp->dev);
goto fw_reset_exit;
} else if (n > 0) {
u16 vf_tmo_dsecs = n * 10;
@@ -13855,7 +14371,7 @@ void bnxt_fw_reset(struct bnxt *bp)
bnxt_queue_fw_reset_work(bp, tmo);
}
fw_reset_exit:
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_chk_missed_irq(struct bnxt *bp)
@@ -14054,7 +14570,7 @@ static void bnxt_sp_task(struct work_struct *work)
static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int *max_cp);
-/* Under rtnl_lock */
+/* Under netdev instance lock */
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
@@ -14446,8 +14962,8 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
bnxt_dl_health_fw_status_update(bp, false);
- bp->fw_reset_state = 0;
- dev_close(bp->dev);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
+ netif_close(bp->dev);
}
static void bnxt_fw_reset_task(struct work_struct *work)
@@ -14482,10 +14998,10 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
bp->fw_reset_timestamp = jiffies;
- rtnl_lock();
+ netdev_lock(bp->dev);
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
goto ulp_start;
}
bnxt_fw_reset_close(bp);
@@ -14496,7 +15012,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
tmo = bp->fw_reset_min_dsecs * HZ / 10;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_queue_fw_reset_work(bp, tmo);
return;
}
@@ -14574,10 +15090,12 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
+ netdev_lock(bp->dev);
rc = bnxt_open(bp->dev);
if (rc) {
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
+ netdev_unlock(bp->dev);
rtnl_unlock();
goto ulp_start;
}
@@ -14597,13 +15115,14 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_recovery_done(bp);
bnxt_dl_health_fw_status_update(bp, true);
}
+ netdev_unlock(bp->dev);
rtnl_unlock();
bnxt_ulp_start(bp, 0);
bnxt_reenable_sriov(bp);
- rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
break;
}
return;
@@ -14616,9 +15135,9 @@ fw_reset_abort_status:
netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
}
fw_reset_abort:
- rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
ulp_start:
bnxt_ulp_start(bp, rc);
}
@@ -14710,13 +15229,14 @@ init_err:
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
+ netdev_assert_locked(dev);
+
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -14737,11 +15257,12 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
+ netdev_assert_locked(dev);
+
if (netif_running(dev))
bnxt_close_nic(bp, true, false);
@@ -15236,6 +15757,9 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
struct bnxt_cp_ring_info *cpr;
u64 *sw;
+ if (!bp->bnapi)
+ return;
+
cpr = &bp->bnapi[i]->cp_ring;
sw = cpr->stats.sw_stats;
@@ -15259,6 +15783,9 @@ static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
struct bnxt_napi *bnapi;
u64 *sw;
+ if (!bp->tx_ring)
+ return;
+
bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
sw = bnapi->cp_ring.stats.sw_stats;
@@ -15293,19 +15820,6 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
.get_base_stats = bnxt_get_base_stats,
};
-static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
-{
- u16 mem_size;
-
- rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
- mem_size = rxr->rx_agg_bmap_size / 8;
- rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
- if (!rxr->rx_agg_bmap)
- return -ENOMEM;
-
- return 0;
-}
-
static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
{
struct bnxt_rx_ring_info *rxr, *clone;
@@ -15313,6 +15827,9 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
struct bnxt_ring_struct *ring;
int rc;
+ if (!bp->rx_ring)
+ return -ENETDOWN;
+
rxr = &bp->rx_ring[idx];
clone = qmem;
memcpy(clone, rxr, sizeof(*rxr));
@@ -15323,6 +15840,7 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
clone->rx_agg_prod = 0;
clone->rx_sw_agg_prod = 0;
clone->rx_next_cons = 0;
+ clone->need_head_pool = false;
rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
if (rc)
@@ -15354,15 +15872,25 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
goto err_free_rx_agg_ring;
}
+ if (bp->flags & BNXT_FLAG_TPA) {
+ rc = bnxt_alloc_one_tpa_info(bp, clone);
+ if (rc)
+ goto err_free_tpa_info;
+ }
+
bnxt_init_one_rx_ring_rxbd(bp, clone);
bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- bnxt_alloc_one_rx_ring_page(bp, clone, idx);
+ bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_alloc_one_tpa_info_data(bp, clone);
return 0;
+err_free_tpa_info:
+ bnxt_free_one_tpa_info(bp, clone);
err_free_rx_agg_ring:
bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
err_free_rx_ring:
@@ -15370,9 +15898,11 @@ err_free_rx_ring:
err_rxq_info_unreg:
xdp_rxq_info_unreg(&clone->xdp_rxq);
err_page_pool_destroy:
- clone->page_pool->p.napi = NULL;
page_pool_destroy(clone->page_pool);
+ if (bnxt_separate_head_pool(clone))
+ page_pool_destroy(clone->head_pool);
clone->page_pool = NULL;
+ clone->head_pool = NULL;
return rc;
}
@@ -15382,13 +15912,16 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ring_struct *ring;
- bnxt_free_one_rx_ring(bp, rxr);
- bnxt_free_one_rx_agg_ring(bp, rxr);
+ bnxt_free_one_rx_ring_skbs(bp, rxr);
+ bnxt_free_one_tpa_info(bp, rxr);
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
+ if (bnxt_separate_head_pool(rxr))
+ page_pool_destroy(rxr->head_pool);
rxr->page_pool = NULL;
+ rxr->head_pool = NULL;
ring = &rxr->rx_ring_struct;
bnxt_free_ring(bp, &ring->ring_mem);
@@ -15461,7 +15994,9 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt_cp_ring_info *cpr;
struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
int i, rc;
+ u16 mru;
rxr = &bp->rx_ring[idx];
clone = qmem;
@@ -15470,43 +16005,64 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
rxr->rx_agg_prod = clone->rx_agg_prod;
rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
rxr->rx_next_cons = clone->rx_next_cons;
+ rxr->rx_tpa = clone->rx_tpa;
+ rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
rxr->page_pool = clone->page_pool;
+ rxr->head_pool = clone->head_pool;
rxr->xdp_rxq = clone->xdp_rxq;
+ rxr->need_head_pool = clone->need_head_pool;
bnxt_copy_rx_ring(bp, rxr, clone);
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc)
- return rc;
+ goto err_reset;
+
+ if (bp->tph_mode) {
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
+ if (rc)
+ goto err_reset;
+ }
+
rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
if (rc)
- goto err_free_hwrm_rx_ring;
+ goto err_reset;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- cpr = &rxr->bnapi->cp_ring;
- cpr->sw_stats->rx.rx_resets++;
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
+ rc = bnxt_tx_queue_start(bp, idx);
+ if (rc)
+ goto err_reset;
+ }
+
+ napi_enable_locked(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
- for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
- rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
- vnic->vnic_id, rc);
+ rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
+ if (rc)
return rc;
- }
- vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
- bnxt_hwrm_vnic_update(bp, vnic,
- VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
}
+ return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
- return 0;
-
-err_free_hwrm_rx_ring:
- bnxt_hwrm_rx_ring_free(bp, rxr, false);
+err_reset:
+ netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
+ rc);
+ napi_enable_locked(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ netif_close(dev);
return rc;
}
@@ -15514,21 +16070,43 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
int i;
- for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
- vnic->mru = 0;
- bnxt_hwrm_vnic_update(bp, vnic,
- VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
- }
+ bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
+ }
+ bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
rxr = &bp->rx_ring[idx];
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+ cancel_work_sync(&cpr->dim.work);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
- rxr->rx_next_cons = 0;
page_pool_disable_direct_recycling(rxr->page_pool);
+ if (bnxt_separate_head_pool(rxr))
+ page_pool_disable_direct_recycling(rxr->head_pool);
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ bnxt_tx_queue_stop(bp, idx);
+
+ /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
+ * completion is handled in NAPI to guarantee no more DMA on that ring
+ * after seeing the completion.
+ */
+ napi_disable_locked(&bnapi->napi);
+
+ if (bp->tph_mode) {
+ bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
+ bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
+ }
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
memcpy(qmem, rxr, sizeof(*rxr));
bnxt_init_rx_ring_struct(bp, qmem);
@@ -15554,8 +16132,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_rdma_aux_device_del(bp);
- bnxt_ptp_clear(bp);
unregister_netdev(dev);
+ bnxt_ptp_clear(bp);
bnxt_rdma_aux_device_uninit(bp);
@@ -15607,6 +16185,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
bp->dev->priv_flags |= IFF_SUPP_NOFCS;
else
bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
+
+ bp->mac_flags = 0;
+ bnxt_hwrm_mac_qcaps(bp);
+
if (!fw_dflt)
return 0;
@@ -15844,7 +16426,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
int rc;
- ASSERT_RTNL();
+ netdev_ops_assert_locked(bp->dev);
bnxt_hwrm_func_qcaps(bp);
if (netif_running(bp->dev))
@@ -15857,7 +16439,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
if (netif_running(bp->dev)) {
if (rc)
- dev_close(bp->dev);
+ netif_close(bp->dev);
else
rc = bnxt_open_nic(bp, true, false);
}
@@ -16135,8 +16717,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bp->max_fltr < BNXT_MAX_FLTR)
bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
- bnxt_set_rx_skb_mode(bp, false);
+ __bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
+ bnxt_init_ring_params(bp);
bnxt_set_ring_params(bp);
bnxt_rdma_aux_device_init(bp);
rc = bnxt_set_dflt_rings(bp, true);
@@ -16193,6 +16776,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
if (BNXT_SUPPORTS_QUEUE_API(bp))
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
+ dev->request_ops_lock = true;
rc = register_netdev(dev);
if (rc)
@@ -16244,12 +16828,13 @@ static void bnxt_shutdown(struct pci_dev *pdev)
return;
rtnl_lock();
+ netdev_lock(dev);
bp = netdev_priv(dev);
if (!bp)
goto shutdown_exit;
if (netif_running(dev))
- dev_close(dev);
+ netif_close(dev);
bnxt_ptp_clear(bp);
bnxt_clear_int_mode(bp);
@@ -16261,6 +16846,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
}
shutdown_exit:
+ netdev_unlock(dev);
rtnl_unlock();
}
@@ -16273,7 +16859,7 @@ static int bnxt_suspend(struct device *device)
bnxt_ulp_stop(bp);
- rtnl_lock();
+ netdev_lock(dev);
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
@@ -16282,7 +16868,7 @@ static int bnxt_suspend(struct device *device)
bnxt_ptp_clear(bp);
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp, false);
- rtnl_unlock();
+ netdev_unlock(dev);
return rc;
}
@@ -16293,6 +16879,7 @@ static int bnxt_resume(struct device *device)
int rc = 0;
rtnl_lock();
+ netdev_lock(dev);
rc = pci_enable_device(bp->pdev);
if (rc) {
netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
@@ -16335,6 +16922,7 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
+ netdev_unlock(bp->dev);
rtnl_unlock();
bnxt_ulp_start(bp, rc);
if (!rc)
@@ -16370,7 +16958,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_ulp_stop(bp);
- rtnl_lock();
+ netdev_lock(netdev);
netif_device_detach(netdev);
if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -16379,7 +16967,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
}
if (abort || state == pci_channel_io_perm_failure) {
- rtnl_unlock();
+ netdev_unlock(netdev);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -16398,7 +16986,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
bnxt_free_ctx_mem(bp, false);
- rtnl_unlock();
+ netdev_unlock(netdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -16428,7 +17016,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
msleep(900);
- rtnl_lock();
+ netdev_lock(netdev);
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
@@ -16474,16 +17062,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
+ /* IRQ will be initialized later in bnxt_io_resume */
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
- err = bnxt_init_int_mode(bp);
- bnxt_ulp_irq_restart(bp, err);
}
reset_exit:
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
- rtnl_unlock();
+ netdev_unlock(netdev);
return result;
}
@@ -16503,18 +17090,23 @@ static void bnxt_io_resume(struct pci_dev *pdev)
netdev_info(bp->dev, "PCI Slot Resume\n");
rtnl_lock();
+ netdev_lock(netdev);
err = bnxt_hwrm_func_qcaps(bp);
if (!err) {
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
err = bnxt_open(netdev);
- else
+ } else {
err = bnxt_reserve_rings(bp, true);
+ if (!err)
+ err = bnxt_init_int_mode(bp);
+ }
}
if (!err)
netif_device_attach(netdev);
+ netdev_unlock(netdev);
rtnl_unlock();
bnxt_ulp_start(bp, err);
if (!err)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 23f1aff214b4..fda0d3cc6227 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -34,6 +34,9 @@
#include <linux/firmware/broadcom/tee_bnxt_fw.h>
#endif
+#define BNXT_DEFAULT_RX_COPYBREAK 256
+#define BNXT_MAX_RX_COPYBREAK 1024
+
extern struct list_head bnxt_block_cb_list;
struct page_pool;
@@ -79,6 +82,12 @@ struct tx_bd {
#define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
(bp)->tx_ring_mask)
+#define TX_BD_CNT(n) (((n) << TX_BD_FLAGS_BD_CNT_SHIFT) & TX_BD_FLAGS_BD_CNT)
+
+#define TX_MAX_BD_CNT 32
+
+#define TX_MAX_FRAGS (TX_MAX_BD_CNT - 2)
+
struct tx_bd_ext {
__le32 tx_bd_hsize_lflags;
#define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
@@ -267,6 +276,9 @@ struct rx_cmp {
(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+#define RX_CMP_ITYPES(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_FLAGS_ITYPES_MASK)
+
#define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \
((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\
RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT)
@@ -378,7 +390,7 @@ struct rx_agg_cmp {
u32 rx_agg_cmp_opaque;
__le32 rx_agg_cmp_v;
#define RX_AGG_CMP_V (1 << 0)
- #define RX_AGG_CMP_AGG_ID (0xffff << 16)
+ #define RX_AGG_CMP_AGG_ID (0x0fff << 16)
#define RX_AGG_CMP_AGG_ID_SHIFT 16
__le32 rx_agg_cmp_unused;
};
@@ -416,7 +428,7 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25
- #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_START_CMP_AGG_ID_P5 (0x0fff << 16)
#define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
#define RX_TPA_START_CMP_METADATA1 (0xf << 28)
#define RX_TPA_START_CMP_METADATA1_SHIFT 28
@@ -540,7 +552,7 @@ struct rx_tpa_end_cmp {
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
#define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_END_CMP_AGG_ID_SHIFT 25
- #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_END_CMP_AGG_ID_P5 (0x0fff << 16)
#define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_end_cmp_tsdelta;
@@ -891,7 +903,7 @@ struct bnxt_sw_rx_bd {
};
struct bnxt_sw_rx_agg_bd {
- struct page *page;
+ netmem_ref netmem;
unsigned int offset;
dma_addr_t mapping;
};
@@ -1094,6 +1106,7 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
+ bool need_head_pool;
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
@@ -1228,6 +1241,11 @@ struct bnxt_irq {
u8 have_cpumask:1;
char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
+
+ struct bnxt *bp;
+ int msix_nr;
+ int ring_nr;
+ struct irq_affinity_notify affinity_notify;
};
#define HWRM_RING_ALLOC_TX 0x1
@@ -2238,8 +2256,6 @@ struct bnxt {
#define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
#define BNXT_FLAG_JUMBO 0x10
#define BNXT_FLAG_STRIP_VLAN 0x20
- #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
- BNXT_FLAG_LRO)
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
@@ -2260,6 +2276,9 @@ struct bnxt {
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_TX_COAL_CMPL 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
+ #define BNXT_FLAG_HDS 0x20000000
+ #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+ BNXT_FLAG_LRO | BNXT_FLAG_HDS)
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -2267,6 +2286,11 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#ifdef CONFIG_BNXT_SRIOV
+#define BNXT_VF_IS_TRUSTED(bp) ((bp)->vf.flags & BNXT_VF_TRUST)
+#else
+#define BNXT_VF_IS_TRUSTED(bp) 0
+#endif
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
@@ -2339,7 +2363,7 @@ struct bnxt {
enum dma_data_direction rx_dir;
u32 rx_ring_size;
u32 rx_agg_ring_size;
- u32 rx_copy_thresh;
+ u32 rx_copybreak;
u32 rx_ring_mask;
u32 rx_agg_ring_mask;
int rx_nr_pages;
@@ -2398,6 +2422,8 @@ struct bnxt {
u8 max_q;
u8 num_tc;
+ u8 tph_mode;
+
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -2479,6 +2505,8 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
#define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
+ #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
+ #define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42)
u32 fw_dbg_cap;
@@ -2498,6 +2526,8 @@ struct bnxt {
((bp)->fw_cap & BNXT_FW_CAP_ENABLE_RDMA_SRIOV)
#define BNXT_ROCE_VF_RESC_CAP(bp) \
((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED)
+#define BNXT_SW_RES_LMT(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
@@ -2585,6 +2615,7 @@ struct bnxt {
#define BNXT_FW_RESET_STATE_POLL_FW 4
#define BNXT_FW_RESET_STATE_OPENING 5
#define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6
+#define BNXT_FW_RESET_STATE_ABORT 7
u16 fw_reset_min_dsecs;
#define BNXT_DFLT_FW_RST_MIN_DSECS 20
@@ -2657,6 +2688,11 @@ struct bnxt {
#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
+ /* copied from flags in hwrm_port_mac_qcaps_output */
+ u8 mac_flags;
+#define BNXT_MAC_FL_NO_MAC_LPBK \
+ PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
+
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2669,6 +2705,7 @@ struct bnxt {
#define BNXT_DUMP_LIVE 0
#define BNXT_DUMP_CRASH 1
#define BNXT_DUMP_DRIVER 2
+#define BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE 3
struct bpf_prog *xdp_prog;
@@ -2759,6 +2796,8 @@ struct bnxt {
#define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+#define BNXT_HDS_THRESHOLD_MAX 1023
+
static inline u32 bnxt_tx_avail(struct bnxt *bp,
const struct bnxt_tx_ring_info *txr)
{
@@ -2843,7 +2882,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 7236d8e548ab..ce97befd3cb3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -110,20 +110,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
}
}
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
- memcpy(info->dest_buf + off, dma_buf, len);
+ u16 copylen = min_t(u16, len,
+ info->dest_buf_size - off);
+
+ memcpy(info->dest_buf + off, dma_buf, copylen);
+ if (copylen < len)
+ break;
} else {
rc = -ENOBUFS;
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ kfree(info->dest_buf);
+ info->dest_buf = NULL;
+ }
break;
}
}
- if (cmn_req->req_type ==
- cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
- info->dest_buf_size += len;
-
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
break;
@@ -159,8 +169,8 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
return rc;
}
-static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
- u16 segment_id)
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 dump_type,
+ u16 component_id, u16 segment_id)
{
struct hwrm_dbg_coredump_initiate_input *req;
int rc;
@@ -172,6 +182,8 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
+ if (dump_type == BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE)
+ req->seg_flags = DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE;
return hwrm_req_send(bp, req);
}
@@ -450,7 +462,8 @@ static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf,
start = jiffies;
- rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, dump_type, comp_id,
+ seg_id);
if (rc) {
netdev_err(bp->dev,
"Failed to initiate coredump for seg = %d\n",
@@ -493,9 +506,16 @@ err:
start_utc, coredump.total_segs + 1,
rc);
kfree(coredump.data);
- *dump_len += sizeof(struct bnxt_coredump_record);
- if (rc == -ENOBUFS)
+ if (!rc) {
+ *dump_len += sizeof(struct bnxt_coredump_record);
+ /* The actual coredump length can be smaller than the FW
+ * reported length earlier. Use the ethtool provided length.
+ */
+ if (buf_len)
+ *dump_len = buf_len;
+ } else if (rc == -ENOBUFS) {
netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
+ }
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index ef8288fd68f4..777880594a04 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <net/devlink.h>
+#include <net/netdev_lock.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
@@ -439,14 +440,17 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
bnxt_ulp_stop(bp);
rtnl_lock();
+ netdev_lock(bp->dev);
if (bnxt_sriov_cfg(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"reload is unsupported while VFs are allocated or being configured");
+ netdev_unlock(bp->dev);
rtnl_unlock();
bnxt_ulp_start(bp, 0);
return -EOPNOTSUPP;
}
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
rtnl_unlock();
bnxt_ulp_start(bp, 0);
return -ENODEV;
@@ -458,7 +462,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to deregister");
if (netif_running(bp->dev))
- dev_close(bp->dev);
+ netif_close(bp->dev);
+ netdev_unlock(bp->dev);
rtnl_unlock();
break;
}
@@ -479,7 +484,9 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
return -EPERM;
}
rtnl_lock();
+ netdev_lock(bp->dev);
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
rtnl_unlock();
return -ENODEV;
}
@@ -493,6 +500,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware");
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
break;
@@ -511,6 +519,8 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc = 0;
+ netdev_assert_locked(bp->dev);
+
*actions_performed = 0;
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
@@ -535,6 +545,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
if (!netif_running(bp->dev))
NL_SET_ERR_MSG_MOD(extack,
"Device is closed, not waiting for reset notice that will never come");
+ netdev_unlock(bp->dev);
rtnl_unlock();
while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) {
if (time_after(jiffies, timeout)) {
@@ -550,6 +561,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
msleep(50);
}
rtnl_lock();
+ netdev_lock(bp->dev);
if (!rc)
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
@@ -568,8 +580,9 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
}
*actions_performed |= BIT(action);
} else if (netif_running(bp->dev)) {
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
+ netdev_unlock(bp->dev);
rtnl_unlock();
if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
bnxt_ulp_start(bp, rc);
@@ -666,6 +679,8 @@ static const struct bnxt_dl_nvm_param nvm_params[] = {
NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
{DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
+ {DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, NVM_OFF_SUPPORT_RDMA,
+ BNXT_NVM_FUNC_CFG, 1, 1},
{BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
BNXT_NVM_SHARED_CFG, 1, 1},
};
@@ -1010,37 +1025,19 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
}
-static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
- union devlink_param_value *val)
+static int __bnxt_hwrm_nvm_req(struct bnxt *bp,
+ const struct bnxt_dl_nvm_param *nvm, void *msg,
+ union devlink_param_value *val)
{
struct hwrm_nvm_get_variable_input *req = msg;
- struct bnxt_dl_nvm_param nvm_param;
struct hwrm_err_output *resp;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
- int idx = 0, rc, i;
-
- /* Get/Set NVM CFG parameter is supported only on PFs */
- if (BNXT_VF(bp)) {
- hwrm_req_drop(bp, req);
- return -EPERM;
- }
-
- for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
- if (nvm_params[i].id == param_id) {
- nvm_param = nvm_params[i];
- break;
- }
- }
+ int idx = 0, rc;
- if (i == ARRAY_SIZE(nvm_params)) {
- hwrm_req_drop(bp, req);
- return -EOPNOTSUPP;
- }
-
- if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
+ if (nvm->dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
- else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ else if (nvm->dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
@@ -1051,23 +1048,23 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
req->dest_data_addr = cpu_to_le64(data_dma_addr);
- req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
- req->option_num = cpu_to_le16(nvm_param.offset);
+ req->data_len = cpu_to_le16(nvm->nvm_num_bits);
+ req->option_num = cpu_to_le16(nvm->offset);
req->index_0 = cpu_to_le16(idx);
if (idx)
req->dimensions = cpu_to_le16(1);
resp = hwrm_req_hold(bp, req);
if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
- bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ bnxt_copy_to_nvm_data(data, val, nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
rc = hwrm_req_send(bp, msg);
} else {
rc = hwrm_req_send_silent(bp, msg);
if (!rc) {
bnxt_copy_from_nvm_data(val, data,
- nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
} else {
if (resp->cmd_err ==
NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
@@ -1080,6 +1077,27 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
return rc;
}
+static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ union devlink_param_value *val)
+{
+ struct hwrm_nvm_get_variable_input *req = msg;
+ const struct bnxt_dl_nvm_param *nvm_param;
+ int i;
+
+ /* Get/Set NVM CFG parameter is supported only on PFs */
+ if (BNXT_VF(bp)) {
+ hwrm_req_drop(bp, req);
+ return -EPERM;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
+ nvm_param = &nvm_params[i];
+ if (nvm_param->id == param_id)
+ return __bnxt_hwrm_nvm_req(bp, nvm_param, msg, val);
+ }
+ return -EOPNOTSUPP;
+}
+
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
@@ -1116,6 +1134,32 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
}
+static int bnxt_dl_roce_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ const struct bnxt_dl_nvm_param nvm_roce_cap = {0, NVM_OFF_RDMA_CAPABLE,
+ BNXT_NVM_SHARED_CFG, 1, 1};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ struct hwrm_nvm_get_variable_input *req;
+ union devlink_param_value roce_cap;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
+ if (rc)
+ return rc;
+
+ if (__bnxt_hwrm_nvm_req(bp, &nvm_roce_cap, req, &roce_cap)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to verify if device is RDMA Capable");
+ return -EINVAL;
+ }
+ if (!roce_cap.vbool) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support RDMA");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -1180,6 +1224,10 @@ static const struct devlink_param bnxt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
bnxt_dl_msix_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_roce_validate),
DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
"gre_ver_check", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index b8105065367b..7f45dcd7b287 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -41,8 +41,10 @@ static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value)
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
#define NVM_OFF_IGNORE_ARI 164
+#define NVM_OFF_RDMA_CAPABLE 161
#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define NVM_OFF_SUPPORT_RDMA 506
#define NVM_OFF_NVM_CFG_VER 602
#define BNXT_NVM_CFG_VER_BITS 8
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index f1f6bb328a55..f5d490bf997e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -24,6 +24,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/timecounter.h>
+#include <net/netdev_queues.h>
#include <net/netlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -833,6 +834,8 @@ static void bnxt_get_ringparam(struct net_device *dev,
ering->rx_pending = bp->rx_ring_size;
ering->rx_jumbo_pending = bp->rx_agg_ring_size;
ering->tx_pending = bp->tx_ring_size;
+
+ kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX;
}
static int bnxt_set_ringparam(struct net_device *dev,
@@ -840,16 +843,35 @@ static int bnxt_set_ringparam(struct net_device *dev,
struct kernel_ethtool_ringparam *kernel_ering,
struct netlink_ext_ack *extack)
{
+ u8 tcp_data_split = kernel_ering->tcp_data_split;
struct bnxt *bp = netdev_priv(dev);
+ u8 hds_config_mod;
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
(ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
+ hds_config_mod = tcp_data_split != dev->cfg->hds_config;
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod)
+ return -EINVAL;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ hds_config_mod && BNXT_RX_PAGE_MODE(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached");
+ return -EINVAL;
+ }
+
if (netif_running(dev))
bnxt_close_nic(bp, false, false);
+ if (hds_config_mod) {
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ bp->flags |= BNXT_FLAG_HDS;
+ else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ bp->flags &= ~BNXT_FLAG_HDS;
+ }
+
bp->rx_ring_size = ering->rx_pending;
bp->tx_ring_size = ering->tx_pending;
bnxt_set_ring_params(bp);
@@ -1187,10 +1209,14 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
}
}
- if (fltr->base.flags & BNXT_ACT_DROP)
+ if (fltr->base.flags & BNXT_ACT_DROP) {
fs->ring_cookie = RX_CLS_FLOW_DISC;
- else
+ } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
+ fs->flow_type |= FLOW_RSS;
+ cmd->rss_context = fltr->base.fw_vnic_id;
+ } else {
fs->ring_cookie = fltr->base.rxq;
+ }
rc = 0;
fltr_err:
@@ -2036,6 +2062,17 @@ static int bnxt_get_regs_len(struct net_device *dev)
return reg_len;
}
+#define BNXT_PCIE_32B_ENTRY(start, end) \
+ { offsetof(struct pcie_ctx_hw_stats, start), \
+ offsetof(struct pcie_ctx_hw_stats, end) }
+
+static const struct {
+ u16 start;
+ u16 end;
+} bnxt_pcie_32b_entries[] = {
+ BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
+};
+
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *_p)
{
@@ -2046,7 +2083,8 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
int rc;
regs->version = 0;
- bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED))
+ bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return;
@@ -2067,12 +2105,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
rc = hwrm_req_send(bp, req);
if (!rc) {
- __le64 *src = (__le64 *)hw_pcie_stats;
- u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
- int i;
-
- for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
- dst[i] = le64_to_cpu(src[i]);
+ u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
+ u8 *src = (u8 *)hw_pcie_stats;
+ int i, j;
+
+ for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
+ if (i >= bnxt_pcie_32b_entries[j].start &&
+ i <= bnxt_pcie_32b_entries[j].end) {
+ u32 *dst32 = (u32 *)(dst + i);
+
+ *dst32 = le32_to_cpu(*(__le32 *)(src + i));
+ i += 4;
+ if (i > bnxt_pcie_32b_entries[j].end &&
+ j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
+ j++;
+ } else {
+ u64 *dst64 = (u64 *)(dst + i);
+
+ *dst64 = le64_to_cpu(*(__le64 *)(src + i));
+ i += 8;
+ }
+ }
}
hwrm_req_drop(bp, req);
}
@@ -4157,7 +4210,7 @@ err:
static void bnxt_get_pkgver(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- char buf[FW_VER_STR_LEN];
+ char buf[FW_VER_STR_LEN - 5];
int len;
if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
@@ -4323,6 +4376,45 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
return 0;
}
+static int bnxt_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 rx_copybreak;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ rx_copybreak = *(u32 *)data;
+ if (rx_copybreak > BNXT_MAX_RX_COPYBREAK)
+ return -ERANGE;
+ if (rx_copybreak != bp->rx_copybreak) {
+ if (netif_running(dev))
+ return -EBUSY;
+ bp->rx_copybreak = rx_copybreak;
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnxt_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = bp->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
u16 page_number, u8 bank,
u16 start_addr, u16 data_length,
@@ -4371,6 +4463,9 @@ static int bnxt_get_module_info(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
/* No point in going further if phy status indicates
* module is not inserted or if it is powered down or
* if it is of type 10GBase-T
@@ -4422,6 +4517,9 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
u16 start = eeprom->offset, length = eeprom->len;
int rc = 0;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
memset(data, 0, eeprom->len);
/* Read A0 portion of the EEPROM */
@@ -4469,13 +4567,19 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac
return -EINVAL;
}
-static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
- const struct ethtool_module_eeprom *page_data,
- struct netlink_ext_ack *extack)
+static int
+bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
{
- struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Module read/write not permitted on untrusted VF");
+ return -EPERM;
+ }
+
rc = bnxt_get_module_status(bp, extack);
if (rc)
return rc;
@@ -4489,6 +4593,19 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
return -EINVAL;
}
+ return 0;
+}
+
+static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
page_data->page, page_data->bank,
@@ -4502,6 +4619,62 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
return page_data->length;
}
+static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page)
+{
+ struct hwrm_port_phy_i2c_write_input *req;
+ int bytes_written = 0;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE);
+ if (rc)
+ return rc;
+
+ hwrm_req_hold(bp, req);
+ req->i2c_slave_addr = page->i2c_address << 1;
+ req->page_number = cpu_to_le16(page->page);
+ req->bank_number = page->bank;
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET |
+ PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER);
+
+ while (bytes_written < page->length) {
+ u16 xfer_size;
+
+ xfer_size = min_t(u16, page->length - bytes_written,
+ BNXT_MAX_PHY_I2C_RESP_SIZE);
+ req->page_offset = cpu_to_le16(page->offset + bytes_written);
+ req->data_length = xfer_size;
+ memcpy(req->data, page->data + bytes_written, xfer_size);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ break;
+ bytes_written += xfer_size;
+ }
+
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_set_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
+
+ rc = bnxt_write_sfp_module_eeprom_info(bp, page_data);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed");
+ return rc;
+ }
+ return page_data->length;
+}
+
static int bnxt_nway_reset(struct net_device *dev)
{
int rc = 0;
@@ -4773,7 +4946,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
cpr = &rxr->bnapi->cp_ring;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
cpr = rxr->rx_cpr;
- pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak));
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
@@ -4843,6 +5017,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!bp->num_tests || !BNXT_PF(bp))
return;
+ memset(buf, 0, sizeof(u64) * bp->num_tests);
if (etest->flags & ETH_TEST_FL_OFFLINE &&
bnxt_ulp_registered(bp->edev)) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -4850,7 +5025,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
return;
}
- memset(buf, 0, sizeof(u64) * bp->num_tests);
if (!netif_running(dev)) {
etest->flags |= ETH_TEST_FL_FAILED;
return;
@@ -4883,35 +5057,44 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
- buf[BNXT_MACLPBK_TEST_IDX] = 1;
- bnxt_hwrm_mac_loopback(bp, true);
- msleep(250);
rc = bnxt_half_open_nic(bp);
if (rc) {
- bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+ buf[BNXT_MACLPBK_TEST_IDX] = 1;
+ if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK)
+ goto skip_mac_loopback;
+
+ bnxt_hwrm_mac_loopback(bp, true);
+ msleep(250);
if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
else
buf[BNXT_MACLPBK_TEST_IDX] = 0;
bnxt_hwrm_mac_loopback(bp, false);
+skip_mac_loopback:
+ buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK)
+ goto skip_phy_loopback;
+
bnxt_hwrm_phy_loopback(bp, true, false);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_PHYLPBK_TEST_IDX] = 0;
+skip_phy_loopback:
+ buf[BNXT_EXTLPBK_TEST_IDX] = 1;
if (do_ext_lpbk) {
etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
bnxt_hwrm_phy_loopback(bp, true, true);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_EXTLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_EXTLPBK_TEST_IDX] = 0;
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
@@ -4989,8 +5172,9 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
- if (dump->flag > BNXT_DUMP_DRIVER) {
- netdev_info(dev, "Supports only Live(0), Crash(1), Driver(2) dumps.\n");
+ if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) {
+ netdev_info(dev,
+ "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n");
return -EINVAL;
}
@@ -5305,6 +5489,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_STATS_BLOCK_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_fec_stats = bnxt_get_fec_stats,
@@ -5346,9 +5532,12 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ext_stats = bnxt_get_link_ext_stats,
.get_eee = bnxt_get_eee,
.set_eee = bnxt_set_eee,
+ .get_tunable = bnxt_get_tunable,
+ .set_tunable = bnxt_set_tunable,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page,
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 5f8de1634378..549231703bce 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2024 Broadcom Inc.
+ * Copyright (c) 2018-2025 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -438,6 +438,7 @@ struct cmd_nums {
#define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
#define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
#define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL
#define HWRM_PORT_POE_CFG 0x230UL
#define HWRM_PORT_POE_QCFG 0x231UL
#define HWRM_UDCC_QCAPS 0x258UL
@@ -514,6 +515,8 @@ struct cmd_nums {
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
#define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
+ #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL
+ #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL
#define HWRM_SV 0x400UL
#define HWRM_DBG_SERDES_TEST 0xff0eUL
#define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
@@ -629,8 +632,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 85
-#define HWRM_VERSION_STR "1.10.3.85"
+#define HWRM_VERSION_RSVD 97
+#define HWRM_VERSION_STR "1.10.3.97"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1905,11 +1908,15 @@ struct hwrm_func_qcaps_output {
__le32 roce_vf_max_srq;
__le32 roce_vf_max_gid;
__le32 flags_ext3;
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL
__le16 max_roce_vfs;
- u8 unused_3[5];
+ __le16 max_crypto_rx_flow_filters;
+ u8 unused_3[3];
u8 valid;
};
@@ -1924,7 +1931,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:1280b/160B) */
+/* hwrm_func_qcfg_output (size:1344b/168B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -2087,14 +2094,18 @@ struct hwrm_func_qcfg_output {
__le16 host_mtu;
__le16 flags2;
#define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
- u8 unused_4[2];
+ __le16 stag_vid;
u8 port_kdnet_mode;
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
u8 kdnet_pcie_function;
__le16 port_kdnet_fid;
- u8 unused_5[2];
+ u8 unused_5;
+ u8 roce_bidi_opt_mode;
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL
__le32 num_ktls_tx_key_ctxs;
__le32 num_ktls_rx_key_ctxs;
u8 lag_id;
@@ -2112,7 +2123,8 @@ struct hwrm_func_qcfg_output {
__le16 xid_partition_cfg;
#define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
#define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
- u8 unused_7;
+ __le16 mirror_vnic_id;
+ u8 unused_7[7];
u8 valid;
};
@@ -3965,7 +3977,7 @@ struct ts_split_entries {
__le32 region_num_entries;
u8 tsid;
u8 lkup_static_bkt_cnt_exp[2];
- u8 rsvd;
+ u8 locked;
__le32 rsvd2[2];
};
@@ -5483,6 +5495,37 @@ struct hwrm_port_phy_qcaps_output {
u8 valid;
};
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+ __le32 data[16];
+};
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
struct hwrm_port_phy_i2c_read_input {
__le16 req_type;
@@ -6610,8 +6653,9 @@ struct hwrm_vnic_alloc_input {
__le32 flags;
#define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
#define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL
__le16 virtio_net_fid;
- u8 unused_0[2];
+ __le16 vnic_id;
};
/* hwrm_vnic_alloc_output (size:128b/16B) */
@@ -6710,6 +6754,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
#define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
#define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
+ #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -6729,7 +6774,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
#define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
#define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
- u8 unused0[4];
+ __le32 raw_qp_id;
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -7082,6 +7127,15 @@ struct hwrm_vnic_plcmodes_cfg_output {
u8 valid;
};
+/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_plcmodes_cfg_cmd_err {
+ u8 code;
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD
+ u8 unused_0[7];
+};
+
/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
__le16 req_type;
@@ -7131,15 +7185,16 @@ struct hwrm_ring_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
- #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
- #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
- #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
- #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
- #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -7226,7 +7281,11 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
#define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
#define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
- u8 unused_4[2];
+ u8 rx_rate_profile_sel;
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE
+ u8 unused_4;
__le64 cq_handle;
};
@@ -9122,6 +9181,39 @@ struct pcie_ctx_hw_stats {
__le64 pcie_recovery_histogram;
};
+/* pcie_ctx_hw_stats_v2 (size:4096b/512B) */
+struct pcie_ctx_hw_stats_v2 {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+ __le32 pcie_tl_credit_nph_histogram[8];
+ __le32 pcie_tl_credit_ph_histogram[8];
+ __le32 pcie_tl_credit_pd_histogram[8];
+ __le32 pcie_cmpl_latest_times[4];
+ __le32 pcie_cmpl_longest_time;
+ __le32 pcie_cmpl_shortest_time;
+ __le32 unused_0[2];
+ __le32 pcie_cmpl_latest_headers[4][4];
+ __le32 pcie_cmpl_longest_headers[4][4];
+ __le32 pcie_cmpl_shortest_headers[4][4];
+ __le32 pcie_wr_latency_histogram[12];
+ __le32 pcie_wr_latency_all_normal_count;
+ __le32 unused_1;
+ __le64 pcie_posted_packet_count;
+ __le64 pcie_non_posted_packet_count;
+ __le64 pcie_other_packet_count;
+ __le64 pcie_blocked_packet_count;
+ __le64 pcie_cmpl_packet_count;
+};
+
/* hwrm_stat_generic_qstats_input (size:256b/32B) */
struct hwrm_stat_generic_qstats_input {
__le16 req_type;
@@ -9317,6 +9409,9 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND
__le16 len;
u8 version;
+ #define STRUCT_HDR_VERSION_0 0x0UL
+ #define STRUCT_HDR_VERSION_1 0x1UL
+ #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1
u8 count;
__le16 subtype;
__le16 next_offset;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index 15ca51b5d204..fb5f5b063c3d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -58,7 +58,7 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
-#define HWRM_CMD_MAX_TIMEOUT 40000U
+#define HWRM_CMD_MAX_TIMEOUT 60000U
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 2d4e19b96ee7..0669d43472f5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -794,6 +794,27 @@ next_slot:
return HZ;
}
+void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp)
+{
+ struct bnxt_ptp_tx_req *txts_req;
+ u16 cons = ptp->txts_cons;
+
+ /* make sure ptp aux worker finished with
+ * possible BNXT_STATE_OPEN set
+ */
+ ptp_cancel_worker_sync(ptp->ptp_clock);
+
+ ptp->tx_avail = BNXT_MAX_TX_TS;
+ while (cons != ptp->txts_prod) {
+ txts_req = &ptp->txts_req[cons];
+ if (!IS_ERR_OR_NULL(txts_req->tx_skb))
+ dev_kfree_skb_any(txts_req->tx_skb);
+ cons = NEXT_TXTS(cons);
+ }
+ ptp->txts_cons = cons;
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+}
+
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
{
spin_lock_bh(&ptp->ptp_tx_lock);
@@ -1105,7 +1126,6 @@ out:
void bnxt_ptp_clear(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- int i;
if (!ptp)
return;
@@ -1117,12 +1137,5 @@ void bnxt_ptp_clear(struct bnxt *bp)
kfree(ptp->ptp_info.pin_config);
ptp->ptp_info.pin_config = NULL;
- for (i = 0; i < BNXT_MAX_TX_TS; i++) {
- if (ptp->txts_req[i].tx_skb) {
- dev_kfree_skb_any(ptp->txts_req[i].tx_skb);
- ptp->txts_req[i].tx_skb = NULL;
- }
- }
-
bnxt_unmap_ptp_regs(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index a95f05e9c579..0481161d26ef 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -162,6 +162,7 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
+void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp);
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 12b6ed51fd88..5ddddd89052f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -946,7 +946,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
/* Reclaim all resources for the PF. */
rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_restore_pf_fw_resources(bp);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
@@ -956,17 +958,21 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct bnxt *bp = netdev_priv(dev);
rtnl_lock();
+ netdev_lock(dev);
if (!netif_running(dev)) {
netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
bp->sriov_cfg = true;
+ netdev_unlock(dev);
rtnl_unlock();
if (pci_vfs_assigned(bp->pdev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index b771c84cdd89..2450a369b792 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -20,6 +20,7 @@
#include <asm/byteorder.h>
#include <linux/bitmap.h>
#include <linux/auxiliary_bus.h>
+#include <net/netdev_lock.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -112,7 +113,7 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt_ulp *ulp;
int rc = 0;
- rtnl_lock();
+ netdev_lock(dev);
mutex_lock(&edev->en_dev_lock);
if (!bp->irq_tbl) {
rc = -ENODEV;
@@ -138,7 +139,7 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
exit:
mutex_unlock(&edev->en_dev_lock);
- rtnl_unlock();
+ netdev_unlock(dev);
return rc;
}
EXPORT_SYMBOL(bnxt_register_dev);
@@ -148,10 +149,9 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
- int i = 0;
ulp = edev->ulp_tbl;
- rtnl_lock();
+ netdev_lock(dev);
mutex_lock(&edev->en_dev_lock);
if (ulp->msix_requested)
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
@@ -164,12 +164,8 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
synchronize_rcu();
ulp->max_async_event_id = 0;
ulp->async_events_bmap = NULL;
- while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
- msleep(100);
- i++;
- }
mutex_unlock(&edev->en_dev_lock);
- rtnl_unlock();
+ netdev_unlock(dev);
return;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
@@ -208,7 +204,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
if (rc)
- return rc;
+ goto drop_req;
hwrm_req_timeout(bp, req, fw_msg->timeout);
resp = hwrm_req_hold(bp, req);
@@ -220,6 +216,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
memcpy(fw_msg->resp, resp, resp_len);
}
+drop_req:
hwrm_req_drop(bp, req);
return rc;
}
@@ -234,10 +231,9 @@ void bnxt_ulp_stop(struct bnxt *bp)
return;
mutex_lock(&edev->en_dev_lock);
- if (!bnxt_ulp_registered(edev)) {
- mutex_unlock(&edev->en_dev_lock);
- return;
- }
+ if (!bnxt_ulp_registered(edev) ||
+ (edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
+ goto ulp_stop_exit;
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
if (aux_priv) {
@@ -253,6 +249,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
adrv->suspend(adev, pm);
}
}
+ulp_stop_exit:
mutex_unlock(&edev->en_dev_lock);
}
@@ -261,19 +258,13 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- if (!edev)
- return;
-
- edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
-
- if (err)
+ if (!edev || err)
return;
mutex_lock(&edev->en_dev_lock);
- if (!bnxt_ulp_registered(edev)) {
- mutex_unlock(&edev->en_dev_lock);
- return;
- }
+ if (!bnxt_ulp_registered(edev) ||
+ !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
+ goto ulp_start_exit;
if (edev->ulp_tbl->msix_requested)
bnxt_fill_msix_vecs(bp, edev->msix_entries);
@@ -290,6 +281,8 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
adrv->resume(adev);
}
}
+ulp_start_exit:
+ edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
mutex_unlock(&edev->en_dev_lock);
}
@@ -297,6 +290,7 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
+ bool reset = false;
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
@@ -307,10 +301,12 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_stop)
return;
- ops->ulp_irq_stop(ulp->handle);
+ if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
+ reset = true;
+ ops->ulp_irq_stop(ulp->handle, reset);
}
}
@@ -329,7 +325,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_restart)
return;
@@ -345,9 +341,36 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap,
- u16 max_id)
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ struct bnxt_ulp *ulp;
+
+ if (!bnxt_ulp_registered(edev))
+ return;
+ ulp = edev->ulp_tbl;
+
+ rcu_read_lock();
+
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_async_notifier)
+ goto exit_unlock_rcu;
+ if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
+ goto exit_unlock_rcu;
+
+ /* Read max_async_event_id first before testing the bitmap. */
+ smp_rmb();
+
+ if (test_bit(event_id, ulp->async_events_bmap))
+ ops->ulp_async_notifier(ulp->handle, cmpl);
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -359,7 +382,6 @@ int bnxt_register_async_events(struct bnxt_en_dev *edev,
smp_wmb();
ulp->max_async_event_id = max_id;
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
- return 0;
}
EXPORT_SYMBOL(bnxt_register_async_events);
@@ -416,6 +438,8 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->flags |= BNXT_EN_FLAG_VF;
if (BNXT_ROCE_VF_RESC_CAP(bp))
edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT;
+ if (BNXT_SW_RES_LMT(bp))
+ edev->flags |= BNXT_EN_FLAG_SW_RES_LMT;
edev->chip_num = bp->chip_num;
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 5d6aac60f236..7b9dd8ebe4bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -10,9 +10,6 @@
#ifndef BNXT_ULP_H
#define BNXT_ULP_H
-#define BNXT_ROCE_ULP 0
-#define BNXT_MAX_ULP 1
-
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
@@ -30,7 +27,9 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
- void (*ulp_irq_stop)(void *);
+ /* async_notifier() cannot sleep (in BH context) */
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+ void (*ulp_irq_stop)(void *, bool);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -48,7 +47,6 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
- atomic_t ref_count;
};
struct bnxt_en_dev {
@@ -65,6 +63,8 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_VF 0x10
#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
#define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x20
+ #define BNXT_EN_FLAG_SW_RES_LMT 0x40
+#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT)
struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
@@ -124,6 +124,6 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);
void bnxt_unregister_dev(struct bnxt_en_dev *edev);
int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap, u16 max_id);
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 1467b94a6427..619f0844e778 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -257,8 +257,7 @@ bool bnxt_dev_is_vf_rep(struct net_device *dev)
/* Called when the parent PF interface is closed:
* As the mode transition from SWITCHDEV to LEGACY
- * happens under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happens under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_close(struct bnxt *bp)
{
@@ -278,8 +277,7 @@ void bnxt_vf_reps_close(struct bnxt *bp)
/* Called when the parent PF interface is opened (re-opened):
* As the mode transition from SWITCHDEV to LEGACY
- * happen under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happen under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_open(struct bnxt *bp)
{
@@ -348,7 +346,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Ensure that parent PF's and VF-reps' RX/TX has been quiesced
* before proceeding with VF-rep cleanup.
*/
- rtnl_lock();
+ netdev_lock(bp->dev);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
closed = true;
@@ -365,10 +363,10 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
bnxt_open_nic(bp, false, false);
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
- /* Need to call vf_reps_destroy() outside of rntl_lock
- * as unregister_netdev takes rtnl_lock
+ /* Need to call vf_reps_destroy() outside of netdev instance lock
+ * as unregister_netdev takes it
*/
__bnxt_vf_reps_destroy(bp);
}
@@ -376,7 +374,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Free the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
void bnxt_vf_reps_free(struct bnxt *bp)
{
@@ -413,7 +411,7 @@ static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
/* Allocate the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
int bnxt_vf_reps_alloc(struct bnxt *bp)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f88b641533fc..4a6d8cb9f970 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -15,6 +15,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -48,8 +49,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) |
- ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_CNT(num_frags + 1) |
bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
@@ -382,19 +382,24 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
return nxmit;
}
-/* Under rtnl_lock */
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
{
struct net_device *dev = bp->dev;
int tx_xdp = 0, tx_cp, rc, tc;
struct bpf_prog *old;
+ netdev_assert_locked(dev);
+
if (prog && !prog->aux->xdp_has_frags &&
bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
+ if (prog && bp->flags & BNXT_FLAG_HDS) {
+ netdev_warn(dev, "XDP is disallowed when HDS is enabled.\n");
+ return -EOPNOTSUPP;
+ }
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
@@ -420,17 +425,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog) {
bnxt_set_rx_skb_mode(bp, true);
- xdp_features_set_redirect_target(dev, true);
+ xdp_features_set_redirect_target_locked(dev, true);
} else {
- int rx, tx;
-
- xdp_features_clear_redirect_target(dev);
+ xdp_features_clear_redirect_target_locked(dev);
bnxt_set_rx_skb_mode(bp, false);
- bnxt_get_max_rings(bp, &rx, &tx, true);
- if (rx > 1) {
- bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features |= NETIF_F_LRO;
- }
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
@@ -463,23 +461,16 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
- struct page_pool *pool, struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1)
+ struct page_pool *pool, struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!skb)
return NULL;
- skb_checksum_none_assert(skb);
- if (RX_CMP_L4_CS_OK(rxcmp1)) {
- if (bp->dev->features & NETIF_F_RXCSUM) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = RX_CMP_ENCAP(rxcmp1);
- }
- }
+
xdp_update_skb_shared_info(skb, num_frags,
sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
+ BNXT_RX_PAGE_SIZE * num_frags,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0122782400b8..220285e190fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -33,6 +33,5 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
u8 num_frags, struct page_pool *pool,
- struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1);
+ struct xdp_buff *xdp);
#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 3e93f957430b..fa0077bc67b7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -41,15 +41,13 @@
#include "bcmgenet.h"
-/* Maximum number of hardware queues, downsized if needed */
-#define GENET_MAX_MQ_CNT 4
-
/* Default highest priority queue for multi queue support */
-#define GENET_Q0_PRIORITY 0
+#define GENET_Q1_PRIORITY 0
+#define GENET_Q0_PRIORITY 1
-#define GENET_Q16_RX_BD_CNT \
+#define GENET_Q0_RX_BD_CNT \
(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
-#define GENET_Q16_TX_BD_CNT \
+#define GENET_Q0_TX_BD_CNT \
(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
#define RX_BUF_LENGTH 2048
@@ -104,7 +102,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
* the platform is explicitly configured for 64-bits/LPAE.
*/
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
#endif
}
@@ -446,33 +444,48 @@ static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
u32 offset;
u32 reg;
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg |= (1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg |= RBUF_HFB_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= (1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT)) |
+ RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg |= (1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset, reg, reg1;
- offset = HFB_FLT_ENABLE_V3PLUS;
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
- if (f_index < 32) {
- reg1 &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
- } else {
- reg &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- }
- if (!reg && !reg1) {
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg &= ~RBUF_HFB_EN;
+ reg &= ~(1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT));
+ if (!(reg & RBUF_HFB_FILTER_EN_MASK))
+ reg &= ~RBUF_HFB_EN;
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ if (f_index < 32) {
+ reg1 &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
+ } else {
+ reg &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ }
+ if (!reg && !reg1) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
}
@@ -482,6 +495,9 @@ static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
offset = f_index / 8;
reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
reg &= ~(0xF << (4 * (f_index % 8)));
@@ -495,9 +511,13 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
- offset = HFB_FLT_LEN_V3PLUS +
- ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
- sizeof(u32);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ offset = HFB_FLT_LEN_V2;
+ else
+ offset = HFB_FLT_LEN_V3PLUS;
+
+ offset += sizeof(u32) *
+ ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4);
reg = bcmgenet_hfb_reg_readl(priv, offset);
reg &= ~(0xFF << (8 * (f_index % 4)));
reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
@@ -579,13 +599,13 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
struct bcmgenet_rxnfc_rule *rule)
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
- u32 offset = 0, f_length = 0, f;
+ u32 offset = 0, f_length = 0, f, q;
u8 val_8, mask_8;
__be16 val_16;
u16 mask_16;
size_t size;
- f = fs->location;
+ f = fs->location + 1;
if (fs->flow_type & FLOW_MAC_EXT) {
bcmgenet_hfb_insert_data(priv, f, 0,
&fs->h_ext.h_dest, &fs->m_ext.h_dest,
@@ -667,19 +687,16 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
}
bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
- if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
- /* Ring 0 flows can be handled by the default Descriptor Ring
- * We'll map them to ring 0, but don't enable the filter
- */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
- rule->state = BCMGENET_RXNFC_STATE_DISABLED;
- } else {
+ if (fs->ring_cookie == RX_CLS_FLOW_WAKE)
+ q = 0;
+ else if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ q = priv->hw_params->rx_queues + 1;
+ else
/* Other Rx rings are direct mapped here */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
- fs->ring_cookie);
- bcmgenet_hfb_enable_filter(priv, f);
- rule->state = BCMGENET_RXNFC_STATE_ENABLED;
- }
+ q = fs->ring_cookie;
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, q);
+ bcmgenet_hfb_enable_filter(priv, f);
+ rule->state = BCMGENET_RXNFC_STATE_ENABLED;
}
/* bcmgenet_hfb_clear
@@ -690,6 +707,7 @@ static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 base, i;
+ bcmgenet_hfb_set_filter_length(priv, f_index, 0);
base = f_index * priv->hw_params->hfb_filter_size;
for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
@@ -699,22 +717,23 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
{
u32 i;
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
-
- for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
- bcmgenet_rdma_writel(priv, 0x0, i);
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
- for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
- bcmgenet_hfb_reg_writel(priv, 0x0,
- HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+ if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) {
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS);
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+ bcmgenet_rdma_writel(priv, 0, i);
+ }
for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
bcmgenet_hfb_clear_filter(priv, i);
+
+ /* Enable filter 0 to send default flow to ring 0 */
+ bcmgenet_hfb_set_filter_length(priv, 0, 4);
+ bcmgenet_hfb_enable_filter(priv, 0);
}
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
@@ -722,9 +741,6 @@ static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
int i;
INIT_LIST_HEAD(&priv->rxnfc_list);
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
@@ -819,20 +835,16 @@ static int bcmgenet_get_coalesce(struct net_device *dev,
unsigned int i;
ec->tx_max_coalesced_frames =
- bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_tdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_max_coalesced_frames =
- bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_rdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_coalesce_usecs =
- bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
+ bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT) * 8192 / 1000;
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
ring = &priv->rx_rings[i];
ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
}
- ring = &priv->rx_rings[DESC_INDEX];
- ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
return 0;
}
@@ -902,17 +914,13 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
/* Program all TX queues with the same values, as there is no
* ethtool knob to do coalescing on a per-queue basis
*/
- for (i = 0; i < priv->hw_params->tx_queues; i++)
+ for (i = 0; i <= priv->hw_params->tx_queues; i++)
bcmgenet_tdma_ring_writel(priv, i,
ec->tx_max_coalesced_frames,
DMA_MBUF_DONE_THRESH);
- bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
- ec->tx_max_coalesced_frames,
- DMA_MBUF_DONE_THRESH);
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
- bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
return 0;
}
@@ -961,12 +969,13 @@ static int bcmgenet_set_pauseparam(struct net_device *dev,
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
- BCMGENET_STAT_NETDEV = -1,
+ BCMGENET_STAT_RTNL = -1,
BCMGENET_STAT_MIB_RX,
BCMGENET_STAT_MIB_TX,
BCMGENET_STAT_RUNT,
BCMGENET_STAT_MISC,
BCMGENET_STAT_SOFT,
+ BCMGENET_STAT_SOFT64,
};
struct bcmgenet_stats {
@@ -976,13 +985,15 @@ struct bcmgenet_stats {
enum bcmgenet_stat_type type;
/* reg offset from UMAC base for misc counters */
u16 reg_offset;
+ /* sync for u64 stats counters */
+ int syncp_offset;
};
-#define STAT_NETDEV(m) { \
+#define STAT_RTNL(m) { \
.stat_string = __stringify(m), \
- .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
- .stat_offset = offsetof(struct net_device_stats, m), \
- .type = BCMGENET_STAT_NETDEV, \
+ .stat_sizeof = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m), \
+ .type = BCMGENET_STAT_RTNL, \
}
#define STAT_GENET_MIB(str, m, _type) { \
@@ -992,6 +1003,14 @@ struct bcmgenet_stats {
.type = _type, \
}
+#define STAT_GENET_SOFT_MIB64(str, s, m) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->s.m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, s.m), \
+ .type = BCMGENET_STAT_SOFT64, \
+ .syncp_offset = offsetof(struct bcmgenet_priv, s.syncp), \
+}
+
#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
@@ -1006,18 +1025,38 @@ struct bcmgenet_stats {
}
#define STAT_GENET_Q(num) \
- STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
- tx_rings[num].packets), \
- STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
- tx_rings[num].bytes), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
- rx_rings[num].bytes), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
- rx_rings[num].packets), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
- rx_rings[num].errors), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
- rx_rings[num].dropped)
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_packets", \
+ tx_rings[num].stats64, packets), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_bytes", \
+ tx_rings[num].stats64, bytes), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_errors", \
+ tx_rings[num].stats64, errors), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_dropped", \
+ tx_rings[num].stats64, dropped), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_bytes", \
+ rx_rings[num].stats64, bytes), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_packets", \
+ rx_rings[num].stats64, packets), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_errors", \
+ rx_rings[num].stats64, errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_dropped", \
+ rx_rings[num].stats64, dropped), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_multicast", \
+ rx_rings[num].stats64, multicast), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_missed", \
+ rx_rings[num].stats64, missed), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_length_errors", \
+ rx_rings[num].stats64, length_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_over_errors", \
+ rx_rings[num].stats64, over_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_crc_errors", \
+ rx_rings[num].stats64, crc_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_frame_errors", \
+ rx_rings[num].stats64, frame_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_fragmented_errors", \
+ rx_rings[num].stats64, fragmented_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_broadcast", \
+ rx_rings[num].stats64, broadcast)
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
* between the end of TX stats and the beginning of the RX RUNT
@@ -1029,15 +1068,20 @@ struct bcmgenet_stats {
*/
static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
/* general stats */
- STAT_NETDEV(rx_packets),
- STAT_NETDEV(tx_packets),
- STAT_NETDEV(rx_bytes),
- STAT_NETDEV(tx_bytes),
- STAT_NETDEV(rx_errors),
- STAT_NETDEV(tx_errors),
- STAT_NETDEV(rx_dropped),
- STAT_NETDEV(tx_dropped),
- STAT_NETDEV(multicast),
+ STAT_RTNL(rx_packets),
+ STAT_RTNL(tx_packets),
+ STAT_RTNL(rx_bytes),
+ STAT_RTNL(tx_bytes),
+ STAT_RTNL(rx_errors),
+ STAT_RTNL(tx_errors),
+ STAT_RTNL(rx_dropped),
+ STAT_RTNL(tx_dropped),
+ STAT_RTNL(multicast),
+ STAT_RTNL(rx_missed_errors),
+ STAT_RTNL(rx_length_errors),
+ STAT_RTNL(rx_over_errors),
+ STAT_RTNL(rx_crc_errors),
+ STAT_RTNL(rx_frame_errors),
/* UniMAC RSV counters */
STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
@@ -1120,11 +1164,25 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_Q(1),
STAT_GENET_Q(2),
STAT_GENET_Q(3),
- STAT_GENET_Q(16),
+ STAT_GENET_Q(4),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
+#define BCMGENET_STATS64_ADD(stats, m, v) \
+ do { \
+ u64_stats_update_begin(&stats->syncp); \
+ u64_stats_add(&stats->m, v); \
+ u64_stats_update_end(&stats->syncp); \
+ } while (0)
+
+#define BCMGENET_STATS64_INC(stats, m) \
+ do { \
+ u64_stats_update_begin(&stats->syncp); \
+ u64_stats_inc(&stats->m); \
+ u64_stats_update_end(&stats->syncp); \
+ } while (0)
+
static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1208,8 +1266,9 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
s = &bcmgenet_gstrings_stats[i];
switch (s->type) {
- case BCMGENET_STAT_NETDEV:
+ case BCMGENET_STAT_RTNL:
case BCMGENET_STAT_SOFT:
+ case BCMGENET_STAT_SOFT64:
continue;
case BCMGENET_STAT_RUNT:
offset += BCMGENET_STAT_OFFSET;
@@ -1247,28 +1306,40 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
u64 *data)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct rtnl_link_stats64 stats64;
+ struct u64_stats_sync *syncp;
+ unsigned int start;
int i;
if (netif_running(dev))
bcmgenet_update_mib_counters(priv);
- dev->netdev_ops->ndo_get_stats(dev);
+ dev_get_stats(dev, &stats64);
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
const struct bcmgenet_stats *s;
char *p;
s = &bcmgenet_gstrings_stats[i];
- if (s->type == BCMGENET_STAT_NETDEV)
- p = (char *)&dev->stats;
- else
- p = (char *)priv;
- p += s->stat_offset;
- if (sizeof(unsigned long) != sizeof(u32) &&
- s->stat_sizeof == sizeof(unsigned long))
- data[i] = *(unsigned long *)p;
- else
- data[i] = *(u32 *)p;
+ p = (char *)priv;
+
+ if (s->type == BCMGENET_STAT_SOFT64) {
+ syncp = (struct u64_stats_sync *)(p + s->syncp_offset);
+ do {
+ start = u64_stats_fetch_begin(syncp);
+ data[i] = u64_stats_read((u64_stats_t *)(p + s->stat_offset));
+ } while (u64_stats_fetch_retry(syncp, start));
+ } else {
+ if (s->type == BCMGENET_STAT_RTNL)
+ p = (char *)&stats64;
+
+ p += s->stat_offset;
+ if (sizeof(unsigned long) != sizeof(u32) &&
+ s->stat_sizeof == sizeof(unsigned long))
+ data[i] = *(unsigned long *)p;
+ else
+ data[i] = *(u32 *)p;
+ }
}
}
@@ -1438,7 +1509,8 @@ static int bcmgenet_insert_flow(struct net_device *dev,
}
if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
- cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
+ cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE &&
+ cmd->fs.ring_cookie != RX_CLS_FLOW_DISC) {
netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
cmd->fs.ring_cookie);
return -EINVAL;
@@ -1472,10 +1544,10 @@ static int bcmgenet_insert_flow(struct net_device *dev,
loc_rule = &priv->rxnfc_rules[cmd->fs.location];
}
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&loc_rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memcpy(&loc_rule->fs, &cmd->fs,
@@ -1505,10 +1577,10 @@ static int bcmgenet_delete_flow(struct net_device *dev,
}
if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
@@ -1651,9 +1723,9 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
/* Power down LED */
- if (priv->hw_params->flags & GENET_HAS_EXT) {
+ if (bcmgenet_has_ext(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm)
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv))
reg |= EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1676,13 +1748,14 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
return ret;
}
-static void bcmgenet_power_up(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+static int bcmgenet_power_up(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ int ret = 0;
u32 reg;
- if (!(priv->hw_params->flags & GENET_HAS_EXT))
- return;
+ if (!bcmgenet_has_ext(priv))
+ return ret;
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
@@ -1690,7 +1763,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
EXT_ENERGY_DET_MASK);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1718,11 +1791,13 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
break;
case GENET_POWER_WOL_MAGIC:
- bcmgenet_wol_power_up_cfg(priv, mode);
- return;
+ ret = bcmgenet_wol_power_up_cfg(priv, mode);
+ break;
default:
break;
}
+
+ return ret;
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1759,18 +1834,6 @@ static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
return tx_cb_ptr;
}
-static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv,
@@ -1785,18 +1848,6 @@ static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
INTRL2_CPU_MASK_CLEAR);
}
-static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
@@ -1868,6 +1919,7 @@ static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
+ struct bcmgenet_tx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned int txbds_processed = 0;
unsigned int bytes_compl = 0;
@@ -1877,12 +1929,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct sk_buff *skb;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX)
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_CLEAR);
- else
- bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
- INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index), INTRL2_CPU_CLEAR);
/* Compute how many buffers are transmitted since last xmit call */
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
@@ -1913,22 +1960,51 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
ring->free_bds += txbds_processed;
ring->c_index = c_index;
- ring->packets += pkts_compl;
- ring->bytes += bytes_compl;
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_add(&stats->packets, pkts_compl);
+ u64_stats_add(&stats->bytes, bytes_compl);
+ u64_stats_update_end(&stats->syncp);
- netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index),
pkts_compl, bytes_compl);
return txbds_processed;
}
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+ struct bcmgenet_tx_ring *ring,
+ bool all)
{
- unsigned int released;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ unsigned int released, drop, wr_ptr;
+ struct enet_cb *cb_ptr;
+ struct sk_buff *skb;
spin_lock_bh(&ring->lock);
released = __bcmgenet_tx_reclaim(dev, ring);
+ if (all) {
+ skb = NULL;
+ drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK;
+ released += drop;
+ ring->prod_index = ring->c_index & DMA_C_INDEX_MASK;
+ while (drop--) {
+ cb_ptr = bcmgenet_put_txcb(priv, ring);
+ skb = cb_ptr->skb;
+ bcmgenet_free_tx_cb(kdev, cb_ptr);
+ if (skb && cb_ptr == GENET_CB(skb)->first_cb) {
+ dev_consume_skb_any(skb);
+ skb = NULL;
+ }
+ }
+ if (skb)
+ dev_consume_skb_any(skb);
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
+ wr_ptr = ring->write_ptr * WORDS_PER_BD(priv);
+ bcmgenet_tdma_ring_writel(priv, ring->index, wr_ptr,
+ TDMA_WRITE_PTR);
+ }
spin_unlock_bh(&ring->lock);
return released;
@@ -1944,14 +2020,14 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
spin_lock(&ring->lock);
work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(ring->priv->dev, ring->index);
netif_tx_wake_queue(txq);
}
spin_unlock(&ring->lock);
if (work_done == 0) {
napi_complete(napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
return 0;
}
@@ -1962,22 +2038,21 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
static void bcmgenet_tx_reclaim_all(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int i;
-
- if (netif_is_multiqueue(dev)) {
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
- }
+ int i = 0;
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+ do {
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++], true);
+ } while (i <= priv->hw_params->tx_queues && netif_is_multiqueue(dev));
}
/* Reallocate the SKB to put enough headroom in front of it and insert
* the transmit checksum offsets in the descriptors
*/
static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct bcmgenet_tx_ring *ring)
{
+ struct bcmgenet_tx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = netdev_priv(dev);
struct status_64 *status = NULL;
struct sk_buff *new_skb;
@@ -1994,7 +2069,7 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
if (!new_skb) {
dev_kfree_skb_any(skb);
priv->mib.tx_realloc_tsb_failed++;
- dev->stats.tx_dropped++;
+ BCMGENET_STATS64_INC(stats, dropped);
return NULL;
}
dev_consume_skb_any(skb);
@@ -2057,19 +2132,14 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
index = skb_get_queue_mapping(skb);
/* Mapping strategy:
- * queue_mapping = 0, unclassified, packet xmited through ring16
- * queue_mapping = 1, goes to ring 0. (highest priority queue
- * queue_mapping = 2, goes to ring 1.
- * queue_mapping = 3, goes to ring 2.
- * queue_mapping = 4, goes to ring 3.
+ * queue_mapping = 0, unclassified, packet xmited through ring 0
+ * queue_mapping = 1, goes to ring 1. (highest priority queue)
+ * queue_mapping = 2, goes to ring 2.
+ * queue_mapping = 3, goes to ring 3.
+ * queue_mapping = 4, goes to ring 4.
*/
- if (index == 0)
- index = DESC_INDEX;
- else
- index -= 1;
-
ring = &priv->tx_rings[index];
- txq = netdev_get_tx_queue(dev, ring->queue);
+ txq = netdev_get_tx_queue(dev, index);
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -2087,7 +2157,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
GENET_CB(skb)->bytes_sent = skb->len;
/* add the Transmit Status Block */
- skb = bcmgenet_add_tsb(dev, skb);
+ skb = bcmgenet_add_tsb(dev, skb, ring);
if (!skb) {
ret = NETDEV_TX_OK;
goto out;
@@ -2229,6 +2299,7 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int budget)
{
+ struct bcmgenet_rx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = ring->priv;
struct net_device *dev = priv->dev;
struct enet_cb *cb;
@@ -2242,15 +2313,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int discards;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX) {
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_CLEAR);
- } else {
- mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
- bcmgenet_intrl2_1_writel(priv,
- mask,
- INTRL2_CPU_CLEAR);
- }
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+ bcmgenet_intrl2_1_writel(priv, mask, INTRL2_CPU_CLEAR);
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
@@ -2258,7 +2322,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
DMA_P_INDEX_DISCARD_CNT_MASK;
if (discards > ring->old_discards) {
discards = discards - ring->old_discards;
- ring->errors += discards;
+ BCMGENET_STATS64_ADD(stats, missed, discards);
ring->old_discards += discards;
/* Clear HW register when we reach 75% of maximum 0xFFFF */
@@ -2284,7 +2348,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
skb = bcmgenet_rx_refill(priv, cb);
if (unlikely(!skb)) {
- ring->dropped++;
+ BCMGENET_STATS64_INC(stats, dropped);
goto next;
}
@@ -2311,8 +2375,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(len > RX_BUF_LENGTH)) {
netif_err(priv, rx_status, dev, "oversized packet\n");
- dev->stats.rx_length_errors++;
- dev->stats.rx_errors++;
+ BCMGENET_STATS64_INC(stats, length_errors);
dev_kfree_skb_any(skb);
goto next;
}
@@ -2320,7 +2383,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- ring->errors++;
+ BCMGENET_STATS64_INC(stats, fragmented_errors);
dev_kfree_skb_any(skb);
goto next;
}
@@ -2333,15 +2396,22 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
DMA_RX_RXER))) {
netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
(unsigned int)dma_flag);
+ u64_stats_update_begin(&stats->syncp);
if (dma_flag & DMA_RX_CRC_ERROR)
- dev->stats.rx_crc_errors++;
+ u64_stats_inc(&stats->crc_errors);
if (dma_flag & DMA_RX_OV)
- dev->stats.rx_over_errors++;
+ u64_stats_inc(&stats->over_errors);
if (dma_flag & DMA_RX_NO)
- dev->stats.rx_frame_errors++;
+ u64_stats_inc(&stats->frame_errors);
if (dma_flag & DMA_RX_LG)
- dev->stats.rx_length_errors++;
- dev->stats.rx_errors++;
+ u64_stats_inc(&stats->length_errors);
+ if ((dma_flag & (DMA_RX_CRC_ERROR |
+ DMA_RX_OV |
+ DMA_RX_NO |
+ DMA_RX_LG |
+ DMA_RX_RXER)) == DMA_RX_RXER)
+ u64_stats_inc(&stats->errors);
+ u64_stats_update_end(&stats->syncp);
dev_kfree_skb_any(skb);
goto next;
} /* error packet */
@@ -2361,10 +2431,15 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
/*Finish setting up the received SKB and send it to the kernel*/
skb->protocol = eth_type_trans(skb, priv->dev);
- ring->packets++;
- ring->bytes += len;
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->packets);
+ u64_stats_add(&stats->bytes, len);
if (dma_flag & DMA_RX_MULT)
- dev->stats.multicast++;
+ u64_stats_inc(&stats->multicast);
+ else if (dma_flag & DMA_RX_BRDCAST)
+ u64_stats_inc(&stats->broadcast);
+ u64_stats_update_end(&stats->syncp);
/* Notify kernel */
napi_gro_receive(&ring->napi, skb);
@@ -2399,7 +2474,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete_done(napi, work_done);
- ring->int_enable(ring);
+ bcmgenet_rx_ring_int_enable(ring);
}
if (ring->dim.use_dim) {
@@ -2523,7 +2598,7 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
int0_enable |= UMAC_IRQ_LINK_EVENT;
}
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2588,8 +2663,8 @@ static void init_umac(struct bcmgenet_priv *priv)
}
/* Enable MDIO interrupts on GENET v3+ */
- if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
- int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ if (bcmgenet_has_mdio_intr(priv))
+ int0_enable |= UMAC_IRQ_MDIO_EVENT;
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2639,15 +2714,6 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
spin_lock_init(&ring->lock);
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->queue = 0;
- ring->int_enable = bcmgenet_tx_ring16_int_enable;
- ring->int_disable = bcmgenet_tx_ring16_int_disable;
- } else {
- ring->queue = index + 1;
- ring->int_enable = bcmgenet_tx_ring_int_enable;
- ring->int_disable = bcmgenet_tx_ring_int_disable;
- }
ring->cbs = priv->tx_cbs + start_ptr;
ring->size = size;
ring->clean_ptr = start_ptr;
@@ -2658,8 +2724,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
ring->end_ptr = end_ptr - 1;
ring->prod_index = 0;
- /* Set flow period for ring != 16 */
- if (index != DESC_INDEX)
+ /* Set flow period for ring != 0 */
+ if (index)
flow_period_val = ENET_MAX_MTU_SIZE << 16;
bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
@@ -2697,13 +2763,6 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->int_enable = bcmgenet_rx_ring16_int_enable;
- ring->int_disable = bcmgenet_rx_ring16_int_disable;
- } else {
- ring->int_enable = bcmgenet_rx_ring_int_enable;
- ring->int_disable = bcmgenet_rx_ring_int_disable;
- }
ring->cbs = priv->rx_cbs + start_ptr;
ring->size = size;
ring->c_index = 0;
@@ -2749,15 +2808,11 @@ static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
@@ -2765,13 +2820,10 @@ static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
}
static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
@@ -2779,82 +2831,104 @@ static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
netif_napi_del(&ring->napi);
}
+}
- ring = &priv->tx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
+static int bcmgenet_tdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int bcmgenet_rdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
}
/* Initialize Tx queues
*
- * Queues 0-3 are priority-based, each one has 32 descriptors,
- * with queue 0 being the highest priority queue.
+ * Queues 1-4 are priority-based, each one has 32 descriptors,
+ * with queue 1 being the highest priority queue.
*
- * Queue 16 is the default Tx queue with
- * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
+ * Queue 0 is the default Tx queue with
+ * GENET_Q0_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
*
* The transmit control block pool is then partitioned as follows:
- * - Tx queue 0 uses tx_cbs[0..31]
- * - Tx queue 1 uses tx_cbs[32..63]
- * - Tx queue 2 uses tx_cbs[64..95]
- * - Tx queue 3 uses tx_cbs[96..127]
- * - Tx queue 16 uses tx_cbs[128..255]
+ * - Tx queue 0 uses tx_cbs[0..127]
+ * - Tx queue 1 uses tx_cbs[128..159]
+ * - Tx queue 2 uses tx_cbs[160..191]
+ * - Tx queue 3 uses tx_cbs[192..223]
+ * - Tx queue 4 uses tx_cbs[224..255]
*/
static void bcmgenet_init_tx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i, dma_enable;
- u32 dma_ctrl, ring_cfg;
- u32 dma_priority[3] = {0, 0, 0};
-
- dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
+ unsigned int start = 0, end = GENET_Q0_TX_BD_CNT;
+ u32 i, ring_mask, dma_priority[3] = {0, 0, 0};
/* Enable strict priority arbiter mode */
bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
/* Initialize Tx priority queues */
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
- i * priv->hw_params->tx_bds_per_q,
- (i + 1) * priv->hw_params->tx_bds_per_q);
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ bcmgenet_init_tx_ring(priv, i, end - start, start, end);
+ start = end;
+ end += priv->hw_params->tx_bds_per_q;
dma_priority[DMA_PRIO_REG_INDEX(i)] |=
- ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
+ (i ? GENET_Q1_PRIORITY : GENET_Q0_PRIORITY)
+ << DMA_PRIO_REG_SHIFT(i);
}
- /* Initialize Tx default queue 16 */
- bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
- priv->hw_params->tx_queues *
- priv->hw_params->tx_bds_per_q,
- TOTAL_DESC);
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
- dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
- ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
- DMA_PRIO_REG_SHIFT(DESC_INDEX));
-
/* Set Tx queue priorities */
bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
- /* Enable Tx queues */
- bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
+ /* Configure Tx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Enable Tx DMA */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Tx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_CTRL);
}
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
@@ -2862,15 +2936,11 @@ static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_rx_ring_int_enable(ring);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
@@ -2878,15 +2948,11 @@ static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
cancel_work_sync(&ring->dim.dim.work);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
- cancel_work_sync(&ring->dim.dim.work);
}
static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
@@ -2894,13 +2960,10 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
netif_napi_del(&ring->napi);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
}
/* Initialize Rx queues
@@ -2908,57 +2971,32 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
* Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
* used to direct traffic to these queues.
*
- * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
+ * Queue 0 is also the default Rx queue with GENET_Q0_RX_BD_CNT descriptors.
*/
static int bcmgenet_init_rx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i;
- u32 dma_enable;
- u32 dma_ctrl;
- u32 ring_cfg;
+ unsigned int start = 0, end = GENET_Q0_RX_BD_CNT;
+ u32 i, ring_mask;
int ret;
- dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
-
/* Initialize Rx priority queues */
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
- ret = bcmgenet_init_rx_ring(priv, i,
- priv->hw_params->rx_bds_per_q,
- i * priv->hw_params->rx_bds_per_q,
- (i + 1) *
- priv->hw_params->rx_bds_per_q);
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
+ ret = bcmgenet_init_rx_ring(priv, i, end - start, start, end);
if (ret)
return ret;
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ start = end;
+ end += priv->hw_params->rx_bds_per_q;
}
- /* Initialize Rx default queue 16 */
- ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
- priv->hw_params->rx_queues *
- priv->hw_params->rx_bds_per_q,
- TOTAL_DESC);
- if (ret)
- return ret;
-
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
-
- /* Enable rings */
- bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
+ /* Configure Rx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Configure ring as descriptor ring and re-enable DMA if enabled */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Rx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_CTRL);
return 0;
}
@@ -2966,26 +3004,9 @@ static int bcmgenet_init_rx_queues(struct net_device *dev)
static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
{
int ret = 0;
- int timeout = 0;
- u32 reg;
- u32 dma_ctrl;
- int i;
/* Disable TDMA to stop add more frames in TX DMA */
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- /* Check TDMA status register to confirm TDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
ret = -ETIMEDOUT;
}
@@ -2994,39 +3015,11 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
usleep_range(10000, 20000);
/* Disable RDMA */
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- timeout = 0;
- /* Check RDMA status register to confirm RDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_rdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
ret = -ETIMEDOUT;
}
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
return ret;
}
@@ -3038,32 +3031,53 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
- for (i = 0; i < priv->num_tx_bds; i++)
- dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
- priv->tx_cbs + i));
-
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ txq = netdev_get_tx_queue(priv->dev, i);
netdev_tx_reset_queue(txq);
}
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
- netdev_tx_reset_queue(txq);
-
bcmgenet_free_rx_buffers(priv);
kfree(priv->rx_cbs);
kfree(priv->tx_cbs);
}
/* init_edma: Initialize DMA control register */
-static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv, bool flush_rx)
{
- int ret;
- unsigned int i;
struct enet_cb *cb;
+ unsigned int i;
+ int ret;
+ u32 reg;
netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
+ /* Disable TX DMA */
+ ret = bcmgenet_tdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Tx DMA\n");
+ return ret;
+ }
+
+ /* Disable RX DMA */
+ ret = bcmgenet_rdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Rx DMA\n");
+ return ret;
+ }
+
+ /* Flush TX queues */
+ bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+ udelay(10);
+ bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+ if (flush_rx) {
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
+ udelay(10);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+ }
+
/* Initialize common Rx ring structures */
priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
priv->num_rx_bds = TOTAL_DESC;
@@ -3113,6 +3127,15 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
/* Initialize Tx queues */
bcmgenet_init_tx_queues(priv->dev);
+ /* Enable RX/TX DMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
return 0;
}
@@ -3142,7 +3165,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
}
-/* bcmgenet_isr1: handle Rx and Tx priority queues */
+/* bcmgenet_isr1: handle Rx and Tx queues */
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
@@ -3161,7 +3184,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
"%s: IRQ=0x%x\n", __func__, status);
/* Check Rx priority queue interrupts */
- for (index = 0; index < priv->hw_params->rx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->rx_queues; index++) {
if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
continue;
@@ -3169,20 +3192,20 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
rx_ring->dim.event_ctr++;
if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
+ bcmgenet_rx_ring_int_disable(rx_ring);
__napi_schedule_irqoff(&rx_ring->napi);
}
}
/* Check Tx priority queue interrupts */
- for (index = 0; index < priv->hw_params->tx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->tx_queues; index++) {
if (!(status & BIT(index)))
continue;
tx_ring = &priv->tx_rings[index];
if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
+ bcmgenet_tx_ring_int_disable(tx_ring);
__napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -3190,12 +3213,10 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
+/* bcmgenet_isr0: handle other stuff */
static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
- struct bcmgenet_rx_ring *rx_ring;
- struct bcmgenet_tx_ring *tx_ring;
unsigned int status;
unsigned long flags;
@@ -3209,29 +3230,8 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
netif_dbg(priv, intr, priv->dev,
"IRQ=0x%x\n", status);
- if (status & UMAC_IRQ_RXDMA_DONE) {
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_ring->dim.event_ctr++;
-
- if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
- __napi_schedule_irqoff(&rx_ring->napi);
- }
- }
-
- if (status & UMAC_IRQ_TXDMA_DONE) {
- tx_ring = &priv->tx_rings[DESC_INDEX];
-
- if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
- __napi_schedule_irqoff(&tx_ring->napi);
- }
- }
-
- if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+ if (bcmgenet_has_mdio_intr(priv) && status & UMAC_IRQ_MDIO_EVENT)
wake_up(&priv->wq);
- }
/* all other interested interrupts handled in bottom half */
status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
@@ -3285,56 +3285,6 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
put_unaligned_be16(addr_tmp, &addr[4]);
}
-/* Returns a reusable dma control register value */
-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
-{
- unsigned int i;
- u32 reg;
- u32 dma_ctrl;
-
- /* disable DMA */
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
- udelay(10);
- bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
-
- if (flush_rx) {
- reg = bcmgenet_rbuf_ctrl_get(priv);
- bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
- udelay(10);
- bcmgenet_rbuf_ctrl_set(priv, reg);
- udelay(10);
- }
-
- return dma_ctrl;
-}
-
-static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
-{
- u32 reg;
-
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-}
-
static void bcmgenet_netif_start(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -3358,7 +3308,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
static int bcmgenet_open(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long dma_ctrl;
int ret;
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@@ -3384,22 +3333,16 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- /* Disable RX/TX DMA and flush TX and RX queues */
- dma_ctrl = bcmgenet_dma_disable(priv, true);
+ /* HFB init */
+ bcmgenet_hfb_init(priv);
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, true);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto err_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
- /* HFB init */
- bcmgenet_hfb_init(priv);
-
ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
dev->name, priv);
if (ret < 0) {
@@ -3446,19 +3389,21 @@ static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- bcmgenet_disable_tx_napi(priv);
netif_tx_disable(dev);
/* Disable MAC receive */
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
umac_enable_set(priv, CMD_RX_EN, false);
+ if (stop_phy)
+ phy_stop(dev->phydev);
+
bcmgenet_dma_teardown(priv);
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
- if (stop_phy)
- phy_stop(dev->phydev);
+ bcmgenet_disable_tx_napi(priv);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@@ -3506,16 +3451,11 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
if (!netif_msg_tx_err(priv))
return;
- txq = netdev_get_tx_queue(priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(priv->dev, ring->index);
spin_lock(&ring->lock);
- if (ring->index == DESC_INDEX) {
- intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
- } else {
- intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = 1 << ring->index;
- }
+ intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = 1 << ring->index;
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
txq_stopped = netif_tx_queue_stopped(txq);
@@ -3529,7 +3469,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
"(sw)c_index: %d (hw)c_index: %d\n"
"(sw)clean_p: %d (sw)write_p: %d\n"
"(sw)cb_ptr: %d (sw)end_ptr: %d\n",
- ring->index, ring->queue,
+ ring->index, ring->index,
txq_stopped ? "stopped" : "active",
intsts & intmsk ? "enabled" : "disabled",
free_bds, ring->size,
@@ -3542,30 +3482,25 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 int0_enable = 0;
u32 int1_enable = 0;
unsigned int q;
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
- bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
bcmgenet_tx_reclaim_all(dev);
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
int1_enable |= (1 << q);
- int0_enable = UMAC_IRQ_TXDMA_DONE;
-
/* Re-enable TX interrupts if disabled */
- bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
netif_trans_update(dev);
- dev->stats.tx_errors++;
+ BCMGENET_STATS64_INC((&priv->tx_rings[txqueue].stats64), errors);
netif_tx_wake_all_queues(dev);
}
@@ -3654,47 +3589,72 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
+static void bcmgenet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long tx_bytes = 0, tx_packets = 0;
- unsigned long rx_bytes = 0, rx_packets = 0;
- unsigned long rx_errors = 0, rx_dropped = 0;
- struct bcmgenet_tx_ring *tx_ring;
- struct bcmgenet_rx_ring *rx_ring;
+ struct bcmgenet_tx_stats64 *tx_stats;
+ struct bcmgenet_rx_stats64 *rx_stats;
+ u64 rx_length_errors, rx_over_errors;
+ u64 rx_missed, rx_fragmented_errors;
+ u64 rx_crc_errors, rx_frame_errors;
+ u64 tx_errors, tx_dropped;
+ u64 rx_errors, rx_dropped;
+ u64 tx_bytes, tx_packets;
+ u64 rx_bytes, rx_packets;
+ unsigned int start;
unsigned int q;
-
- for (q = 0; q < priv->hw_params->tx_queues; q++) {
- tx_ring = &priv->tx_rings[q];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
+ u64 multicast;
+
+ for (q = 0; q <= priv->hw_params->tx_queues; q++) {
+ tx_stats = &priv->tx_rings[q].stats64;
+ do {
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
+ tx_bytes = u64_stats_read(&tx_stats->bytes);
+ tx_packets = u64_stats_read(&tx_stats->packets);
+ tx_errors = u64_stats_read(&tx_stats->errors);
+ tx_dropped = u64_stats_read(&tx_stats->dropped);
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
+
+ stats->tx_bytes += tx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_errors += tx_errors;
+ stats->tx_dropped += tx_dropped;
}
- tx_ring = &priv->tx_rings[DESC_INDEX];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
-
- for (q = 0; q < priv->hw_params->rx_queues; q++) {
- rx_ring = &priv->rx_rings[q];
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
+ for (q = 0; q <= priv->hw_params->rx_queues; q++) {
+ rx_stats = &priv->rx_rings[q].stats64;
+ do {
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
+ rx_bytes = u64_stats_read(&rx_stats->bytes);
+ rx_packets = u64_stats_read(&rx_stats->packets);
+ rx_errors = u64_stats_read(&rx_stats->errors);
+ rx_dropped = u64_stats_read(&rx_stats->dropped);
+ rx_missed = u64_stats_read(&rx_stats->missed);
+ rx_length_errors = u64_stats_read(&rx_stats->length_errors);
+ rx_over_errors = u64_stats_read(&rx_stats->over_errors);
+ rx_crc_errors = u64_stats_read(&rx_stats->crc_errors);
+ rx_frame_errors = u64_stats_read(&rx_stats->frame_errors);
+ rx_fragmented_errors = u64_stats_read(&rx_stats->fragmented_errors);
+ multicast = u64_stats_read(&rx_stats->multicast);
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
+
+ rx_errors += rx_length_errors;
+ rx_errors += rx_crc_errors;
+ rx_errors += rx_frame_errors;
+ rx_errors += rx_fragmented_errors;
+
+ stats->rx_bytes += rx_bytes;
+ stats->rx_packets += rx_packets;
+ stats->rx_errors += rx_errors;
+ stats->rx_dropped += rx_dropped;
+ stats->rx_missed_errors += rx_missed;
+ stats->rx_length_errors += rx_length_errors;
+ stats->rx_over_errors += rx_over_errors;
+ stats->rx_crc_errors += rx_crc_errors;
+ stats->rx_frame_errors += rx_frame_errors;
+ stats->multicast += multicast;
}
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
-
- dev->stats.tx_bytes = tx_bytes;
- dev->stats.tx_packets = tx_packets;
- dev->stats.rx_bytes = rx_bytes;
- dev->stats.rx_packets = rx_packets;
- dev->stats.rx_errors = rx_errors;
- dev->stats.rx_missed_errors = rx_errors;
- dev->stats.rx_dropped = rx_dropped;
- return &dev->stats;
}
static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
@@ -3722,132 +3682,113 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_set_mac_address = bcmgenet_set_mac_addr,
.ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = bcmgenet_set_features,
- .ndo_get_stats = bcmgenet_get_stats,
+ .ndo_get_stats64 = bcmgenet_get_stats64,
.ndo_change_carrier = bcmgenet_change_carrier,
};
-/* Array of GENET hardware parameters/characteristics */
-static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
- [GENET_V1] = {
- .tx_queues = 0,
- .tx_bds_per_q = 0,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .hfb_offset = 0x1000,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x3000,
- .words_per_bd = 2,
- },
- [GENET_V2] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x1000,
- .hfb_reg_offset = 0x2000,
- .rdma_offset = 0x3000,
- .tdma_offset = 0x4000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT,
- },
- [GENET_V3] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x10000,
- .tdma_offset = 0x11000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
- GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V4] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V5] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
+/* GENET hardware parameters/characteristics */
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v1 = {
+ .tx_queues = 0,
+ .tx_bds_per_q = 0,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = GENET_RBUF_OFF + RBUF_HFB_CTRL_V1,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x3000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v2 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = 0x2000,
+ .rdma_offset = 0x3000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v3 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x10000,
+ .tdma_offset = 0x11000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v4 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
};
/* Infer hardware parameters from the detected GENET version */
static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
{
- struct bcmgenet_hw_params *params;
+ const struct bcmgenet_hw_params *params;
u32 reg;
u8 major;
u16 gphy_rev;
- if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
- bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
- genet_dma_ring_regs = genet_dma_ring_regs_v4;
- } else if (GENET_IS_V3(priv)) {
+ /* default to latest values */
+ params = &bcmgenet_hw_params_v4;
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v4;
+ if (GENET_IS_V3(priv)) {
+ params = &bcmgenet_hw_params_v3;
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V2(priv)) {
+ params = &bcmgenet_hw_params_v2;
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V1(priv)) {
+ params = &bcmgenet_hw_params_v1;
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
}
-
- /* enum genet_version starts at 1 */
- priv->hw_params = &bcmgenet_hw_params[priv->version];
- params = priv->hw_params;
+ priv->hw_params = params;
/* Read GENET HW version */
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
major = (reg >> 24 & 0x0f);
- if (major == 6)
+ if (major == 6 || major == 7)
major = 5;
else if (major == 5)
major = 4;
@@ -3898,7 +3839,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
}
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (!(params->flags & GENET_HAS_40BITS))
+ if (!bcmgenet_has_40bits(priv))
pr_warn("GENET does not support 40-bits PA\n");
#endif
@@ -3923,7 +3864,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
struct bcmgenet_plat_data {
enum bcmgenet_version version;
u32 dma_max_burst_length;
- bool ephy_16nm;
+ u32 flags;
};
static const struct bcmgenet_plat_data v1_plat_data = {
@@ -3934,32 +3875,43 @@ static const struct bcmgenet_plat_data v1_plat_data = {
static const struct bcmgenet_plat_data v2_plat_data = {
.version = GENET_V2,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT,
};
static const struct bcmgenet_plat_data v3_plat_data = {
.version = GENET_V3,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
+ GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v4_plat_data = {
.version = GENET_V4,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v5_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm2711_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = 0x08,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm7712_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
- .ephy_16nm = true,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET |
+ GENET_HAS_EPHY_16NM,
};
static const struct of_device_id bcmgenet_match[] = {
@@ -4057,7 +4009,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (pdata) {
priv->version = pdata->version;
priv->dma_max_burst_length = pdata->dma_max_burst_length;
- priv->ephy_16nm = pdata->ephy_16nm;
+ priv->flags = pdata->flags;
} else {
priv->version = pd->genet_version;
priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
@@ -4077,7 +4029,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_set_hw_params(priv);
err = -EIO;
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -4132,16 +4084,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (err)
goto err_clk_disable;
- /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
- * just the ring 16 descriptor based TX
- */
+ /* setup number of real queues + 1 */
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
/* Set default coalescing parameters */
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
priv->rx_rings[i].rx_max_coalesced_frames = 1;
- priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
/* libphy will determine the link state */
netif_carrier_off(dev);
@@ -4205,9 +4154,22 @@ static int bcmgenet_resume_noirq(struct device *d)
reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
if (reg & UMAC_IRQ_WAKE_EVENT)
pm_wakeup_event(&priv->pdev->dev, 0);
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ if (!bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC))
+ return 0;
+
+ /* Failed so fall through to reset MAC */
}
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
+ /* If this is an internal GPHY, power it back on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (priv->internal_phy)
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+ /* take MAC out of reset */
+ bcmgenet_umac_reset(priv);
return 0;
}
@@ -4217,23 +4179,46 @@ static int bcmgenet_resume(struct device *d)
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
- unsigned long dma_ctrl;
int ret;
+ u32 reg;
if (!netif_running(dev))
return 0;
- /* From WOL-enabled suspend, switch to regular clock */
- if (device_may_wakeup(d) && priv->wolopts)
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
-
- /* If this is an internal GPHY, power it back on now, before UniMAC is
- * brought out of reset as absolutely no UniMAC activity is allowed
- */
- if (priv->internal_phy)
- bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
-
- bcmgenet_umac_reset(priv);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (reg & CMD_RX_EN) {
+ /* Successfully exited WoL, just resume data flows */
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_enable_filter(priv,
+ rule->fs.location + 1);
+ bcmgenet_hfb_enable_filter(priv, 0);
+ bcmgenet_set_rx_mode(dev);
+ bcmgenet_enable_rx_napi(priv);
+
+ /* Reinitialize Tx flows */
+ bcmgenet_tdma_disable(priv);
+ bcmgenet_init_tx_queues(priv->dev);
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+ bcmgenet_enable_tx_napi(priv);
+
+ bcmgenet_link_intr_enable(priv);
+ phy_start_machine(dev->phydev);
+
+ netif_device_attach(dev);
+ enable_irq(priv->irq1);
+ return 0;
+ }
+ /* MAC was reset so complete bcmgenet_netif_stop() */
+ umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, false);
+ bcmgenet_rdma_disable(priv);
+ bcmgenet_intr_disable(priv);
+ bcmgenet_fini_dma(priv);
+ enable_irq(priv->irq1);
+ }
init_umac(priv);
@@ -4254,19 +4239,13 @@ static int bcmgenet_resume(struct device *d)
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
- /* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv, false);
-
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, false);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto out_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
@@ -4287,19 +4266,52 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ u32 reg, hfb_enable = 0;
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
- bcmgenet_netif_stop(dev, true);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ netif_tx_disable(dev);
+
+ /* Suspend non-wake Rx data flows */
+ if (priv->wolopts & WAKE_FILTER)
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE &&
+ rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ hfb_enable |= 1 << rule->fs.location;
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg &= ~RBUF_HFB_FILTER_EN_MASK;
+ reg |= hfb_enable << (RBUF_HFB_FILTER_EN_SHIFT + 1);
+ } else {
+ bcmgenet_hfb_reg_writel(priv, hfb_enable << 1,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ }
+ if (!hfb_enable)
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- if (!device_may_wakeup(d))
- phy_suspend(dev->phydev);
+ /* Clear any old filter matches so only new matches wake */
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- /* Disable filtering */
- bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv))
+ netdev_warn(priv->dev,
+ "Timed out while disabling TX DMA\n");
+
+ bcmgenet_disable_tx_napi(priv);
+ bcmgenet_disable_rx_napi(priv);
+ disable_irq(priv->irq1);
+ bcmgenet_tx_reclaim_all(dev);
+ bcmgenet_fini_tx_napi(priv);
+ } else {
+ /* Teardown the interface */
+ bcmgenet_netif_stop(dev, true);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 43b923c48b14..5ec3979779ec 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -18,6 +18,9 @@
#include "../unimac.h"
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT 4
+
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -152,6 +155,30 @@ struct bcmgenet_mib_counters {
u32 tx_realloc_tsb_failed;
};
+struct bcmgenet_tx_stats64 {
+ struct u64_stats_sync syncp;
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ u64_stats_t errors;
+ u64_stats_t dropped;
+};
+
+struct bcmgenet_rx_stats64 {
+ struct u64_stats_sync syncp;
+ u64_stats_t bytes;
+ u64_stats_t packets;
+ u64_stats_t errors;
+ u64_stats_t dropped;
+ u64_stats_t multicast;
+ u64_stats_t broadcast;
+ u64_stats_t missed;
+ u64_stats_t length_errors;
+ u64_stats_t over_errors;
+ u64_stats_t crc_errors;
+ u64_stats_t frame_errors;
+ u64_stats_t fragmented_errors;
+};
+
#define UMAC_MIB_START 0x400
#define UMAC_MDIO_CMD 0x614
@@ -271,6 +298,8 @@ struct bcmgenet_mib_counters {
/* Only valid for GENETv3+ */
#define UMAC_IRQ_MDIO_DONE (1 << 23)
#define UMAC_IRQ_MDIO_ERROR (1 << 24)
+#define UMAC_IRQ_MDIO_EVENT (UMAC_IRQ_MDIO_DONE | \
+ UMAC_IRQ_MDIO_ERROR)
/* INTRL2 instance 1 definitions */
#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF
@@ -476,6 +505,7 @@ enum bcmgenet_version {
#define GENET_HAS_EXT (1 << 1)
#define GENET_HAS_MDIO_INTR (1 << 2)
#define GENET_HAS_MOCA_LINK_DET (1 << 3)
+#define GENET_HAS_EPHY_16NM (1 << 4)
/* BCMGENET hardware parameters, keep this structure nicely aligned
* since it is going to be used in hot paths
@@ -496,7 +526,6 @@ struct bcmgenet_hw_params {
u32 rdma_offset;
u32 tdma_offset;
u32 words_per_bd;
- u32 flags;
};
struct bcmgenet_skb_cb {
@@ -510,10 +539,8 @@ struct bcmgenet_skb_cb {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
- unsigned long packets;
- unsigned long bytes;
+ struct bcmgenet_tx_stats64 stats64;
unsigned int index; /* ring index */
- unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
unsigned int size; /* size of each tx ring */
unsigned int clean_ptr; /* Tx ring clean pointer */
@@ -523,8 +550,6 @@ struct bcmgenet_tx_ring {
unsigned int prod_index; /* Tx ring producer index SW copy */
unsigned int cb_ptr; /* Tx ring initial CB ptr */
unsigned int end_ptr; /* Tx ring end CB ptr */
- void (*int_enable)(struct bcmgenet_tx_ring *);
- void (*int_disable)(struct bcmgenet_tx_ring *);
struct bcmgenet_priv *priv;
};
@@ -538,10 +563,7 @@ struct bcmgenet_net_dim {
struct bcmgenet_rx_ring {
struct napi_struct napi; /* Rx NAPI struct */
- unsigned long bytes;
- unsigned long packets;
- unsigned long errors;
- unsigned long dropped;
+ struct bcmgenet_rx_stats64 stats64;
unsigned int index; /* Rx ring index */
struct enet_cb *cbs; /* Rx ring buffer control block */
unsigned int size; /* Rx ring size */
@@ -553,8 +575,6 @@ struct bcmgenet_rx_ring {
struct bcmgenet_net_dim dim;
u32 rx_max_coalesced_frames;
u32 rx_coalesce_usecs;
- void (*int_enable)(struct bcmgenet_rx_ring *);
- void (*int_disable)(struct bcmgenet_rx_ring *);
struct bcmgenet_priv *priv;
};
@@ -583,7 +603,7 @@ struct bcmgenet_priv {
struct enet_cb *tx_cbs;
unsigned int num_tx_bds;
- struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+ struct bcmgenet_tx_ring tx_rings[GENET_MAX_MQ_CNT + 1];
/* receive variables */
void __iomem *rx_bds;
@@ -593,10 +613,11 @@ struct bcmgenet_priv {
struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES];
struct list_head rxnfc_list;
- struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
+ struct bcmgenet_rx_ring rx_rings[GENET_MAX_MQ_CNT + 1];
/* other misc variables */
- struct bcmgenet_hw_params *hw_params;
+ const struct bcmgenet_hw_params *hw_params;
+ u32 flags;
unsigned autoneg_pause:1;
unsigned tx_pause:1;
unsigned rx_pause:1;
@@ -615,7 +636,6 @@ struct bcmgenet_priv {
phy_interface_t phy_interface;
int phy_addr;
int ext_phy;
- bool ephy_16nm;
/* Interrupt variables */
struct work_struct bcmgenet_irq_work;
@@ -643,13 +663,37 @@ struct bcmgenet_priv {
struct clk *clk_wol;
u32 wolopts;
u8 sopass[SOPASS_MAX];
- bool wol_active;
struct bcmgenet_mib_counters mib;
struct ethtool_keee eee;
};
+static inline bool bcmgenet_has_40bits(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_40BITS);
+}
+
+static inline bool bcmgenet_has_ext(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EXT);
+}
+
+static inline bool bcmgenet_has_mdio_intr(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MDIO_INTR);
+}
+
+static inline bool bcmgenet_has_moca_link_det(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MOCA_LINK_DET);
+}
+
+static inline bool bcmgenet_has_ephy_16nm(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EPHY_16NM);
+}
+
#define GENET_IO_MACRO(name, offset) \
static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
u32 off) \
@@ -702,8 +746,8 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode);
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode);
void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
bool tx_lpi_enabled);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 0715ea5bf13e..8fb551288298 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -41,9 +41,12 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
+ u32 phy_wolopts = 0;
- if (dev->phydev)
+ if (dev->phydev) {
phy_ethtool_get_wol(dev->phydev, wol);
+ phy_wolopts = wol->wolopts;
+ }
/* MAC is not wake-up capable, return what the PHY does */
if (!device_can_wakeup(kdev))
@@ -51,9 +54,14 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Overlay MAC capabilities with that of the PHY queried before */
wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
- wol->wolopts = priv->wolopts;
- memset(wol->sopass, 0, sizeof(wol->sopass));
+ wol->wolopts |= priv->wolopts;
+
+ /* Return the PHY configured magic password */
+ if (phy_wolopts & WAKE_MAGICSECURE)
+ return;
+ /* Otherwise the MAC one */
+ memset(wol->sopass, 0, sizeof(wol->sopass));
if (wol->wolopts & WAKE_MAGICSECURE)
memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
@@ -70,7 +78,7 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Try Wake-on-LAN from the PHY first */
if (dev->phydev) {
ret = phy_ethtool_set_wol(dev->phydev, wol);
- if (ret != -EOPNOTSUPP)
+ if (ret != -EOPNOTSUPP && wol->wolopts)
return ret;
}
@@ -137,8 +145,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
- struct bcmgenet_rxnfc_rule *rule;
- u32 reg, hfb_ctrl_reg, hfb_enable = 0;
+ u32 reg, hfb_ctrl_reg;
int retries = 0;
if (mode != GENET_POWER_WOL_MAGIC) {
@@ -146,18 +153,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return -EINVAL;
}
- /* Can't suspend with WoL if MAC is still in reset */
- spin_lock_bh(&priv->reg_lock);
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
- reg &= ~CMD_SW_RESET;
-
- /* disable RX */
- reg &= ~CMD_RX_EN;
- bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- spin_unlock_bh(&priv->reg_lock);
- mdelay(10);
-
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_EN;
@@ -169,13 +164,8 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (priv->wolopts & WAKE_FILTER) {
- list_for_each_entry(rule, &priv->rxnfc_list, list)
- if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE)
- hfb_enable |= (1 << rule->fs.location);
- reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ reg = hfb_ctrl_reg | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Do not leave UniMAC in MPD mode only */
retries = bcmgenet_poll_wol_status(priv);
@@ -190,15 +180,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
retries);
- clk_prepare_enable(priv->clk_wol);
- priv->wol_active = 1;
+ /* Disable phy status updates while suspending */
+ mutex_lock(&dev->phydev->lock);
+ dev->phydev->state = PHY_READY;
+ mutex_unlock(&dev->phydev->lock);
- if (hfb_enable) {
- bcmgenet_hfb_reg_writel(priv, hfb_enable,
- HFB_FLT_ENABLE_V3PLUS + 4);
- hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
- }
+ clk_prepare_enable(priv->clk_wol);
/* Enable CRC forward */
spin_lock_bh(&priv->reg_lock);
@@ -206,13 +193,17 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
+ /* Can't suspend with WoL if MAC is still in reset */
+ if (reg & CMD_SW_RESET)
+ reg &= ~CMD_SW_RESET;
+
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
- if (hfb_enable)
+ if (hfb_ctrl_reg & RBUF_HFB_EN)
reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR);
@@ -220,40 +211,42 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return 0;
}
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ struct net_device *dev = priv->dev;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode);
- return;
+ return -EINVAL;
}
- if (!priv->wol_active)
- return; /* failed to suspend so skip the rest */
-
- priv->wol_active = 0;
clk_disable_unprepare(priv->clk_wol);
priv->crc_fwd_en = 0;
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT,
+ INTRL2_CPU_MASK_SET);
+ if (bcmgenet_has_mdio_intr(priv))
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_MDIO_EVENT,
+ INTRL2_CPU_MASK_CLEAR);
+
/* Disable Magic Packet Detection */
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
if (!(reg & MPD_EN))
- return; /* already reset so skip the rest */
+ return -EPERM; /* already reset so skip the rest */
reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
}
- /* Disable WAKE_FILTER Detection */
- if (priv->wolopts & WAKE_FILTER) {
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (!(reg & RBUF_ACPI_EN))
- return; /* already reset so skip the rest */
- reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ /* Disable ACPI mode */
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (!(reg & RBUF_ACPI_EN))
+ return -EPERM; /* already reset so skip the rest */
+ reg &= ~RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Disable CRC Forward */
spin_lock_bh(&priv->reg_lock);
@@ -261,4 +254,14 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
spin_unlock_bh(&priv->reg_lock);
+
+ /* Resume link status tracking */
+ mutex_lock(&dev->phydev->lock);
+ if (dev->phydev->link)
+ dev->phydev->state = PHY_RUNNING;
+ else
+ dev->phydev->state = PHY_NOLINK;
+ mutex_unlock(&dev->phydev->lock);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c4a3698cef66..b6437ba7a2eb 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#include <linux/acpi.h>
@@ -154,7 +154,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (GENET_IS_V4(priv) || priv->ephy_16nm) {
+ if (GENET_IS_V4(priv) || bcmgenet_has_ephy_16nm(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
if (enable) {
reg &= ~EXT_CK25_DIS;
@@ -184,7 +184,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
fixed_phy_set_link_update(priv->dev->phydev,
bcmgenet_fixed_phy_link_update);
}
@@ -625,7 +625,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
.asym_pause = 0,
};
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ phydev = fixed_phy_register(&fphy_status, NULL);
if (IS_ERR(phydev)) {
dev_err(kdev, "failed to register fixed PHY device\n");
return PTR_ERR(phydev);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 9cc8db10a8d6..91104cc2c238 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -54,7 +54,8 @@
#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <linux/crc32poly.h>
+#include <linux/crc32.h>
+#include <linux/dmi.h>
#include <net/checksum.h>
#include <net/gso.h>
@@ -7424,7 +7425,7 @@ static void tg3_napi_enable(struct tg3 *tp)
for (i = 0; i < tp->irq_cnt; i++) {
tnapi = &tp->napi[i];
- napi_enable(&tnapi->napi);
+ napi_enable_locked(&tnapi->napi);
if (tnapi->tx_buffers) {
netif_queue_set_napi(tp->dev, txq_idx,
NETDEV_QUEUE_TYPE_TX,
@@ -7445,9 +7446,10 @@ static void tg3_napi_init(struct tg3 *tp)
int i;
for (i = 0; i < tp->irq_cnt; i++) {
- netif_napi_add(tp->dev, &tp->napi[i].napi,
- i ? tg3_poll_msix : tg3_poll);
- netif_napi_set_irq(&tp->napi[i].napi, tp->napi[i].irq_vec);
+ netif_napi_add_locked(tp->dev, &tp->napi[i].napi,
+ i ? tg3_poll_msix : tg3_poll);
+ netif_napi_set_irq_locked(&tp->napi[i].napi,
+ tp->napi[i].irq_vec);
}
}
@@ -9807,26 +9809,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
static inline u32 calc_crc(unsigned char *buf, int len)
{
- u32 reg;
- u32 tmp;
- int j, k;
-
- reg = 0xffffffff;
-
- for (j = 0; j < len; j++) {
- reg ^= buf[j];
-
- for (k = 0; k < 8; k++) {
- tmp = reg & 0x01;
-
- reg >>= 1;
-
- if (tmp)
- reg ^= CRC32_POLY_LE;
- }
- }
-
- return ~reg;
+ return ~crc32(~0, buf, len);
}
static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
@@ -11079,7 +11062,7 @@ static void tg3_chk_missed_msi(struct tg3 *tp)
static void tg3_timer(struct timer_list *t)
{
- struct tg3 *tp = from_timer(tp, t, timer);
+ struct tg3 *tp = timer_container_of(tp, t, timer);
spin_lock(&tp->lock);
@@ -11250,7 +11233,7 @@ static void tg3_timer_start(struct tg3 *tp)
static void tg3_timer_stop(struct tg3 *tp)
{
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
}
/* Restart hardware after configuration changes, self-test, etc.
@@ -11259,6 +11242,8 @@ static void tg3_timer_stop(struct tg3 *tp)
static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
__releases(tp->lock)
__acquires(tp->lock)
+ __releases(tp->dev->lock)
+ __acquires(tp->dev->lock)
{
int err;
@@ -11271,7 +11256,9 @@ static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
tg3_timer_stop(tp);
tp->irq_sync = 0;
tg3_napi_enable(tp);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 0);
}
return err;
@@ -11299,6 +11286,7 @@ static void tg3_reset_task(struct work_struct *work)
tg3_netif_stop(tp);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 1);
if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
@@ -11318,12 +11306,14 @@ static void tg3_reset_task(struct work_struct *work)
* call cancel_work_sync() and wait forever.
*/
tg3_flag_clear(tp, RESET_TASK_PENDING);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
goto out;
}
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(tp->dev);
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
out:
@@ -11683,9 +11673,11 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
if (err)
goto out_ints_fini;
+ netdev_lock(dev);
tg3_napi_init(tp);
tg3_napi_enable(tp);
+ netdev_unlock(dev);
for (i = 0; i < tp->irq_cnt; i++) {
err = tg3_request_irq(tp, i);
@@ -12569,6 +12561,7 @@ static int tg3_set_ringparam(struct net_device *dev,
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
tp->rx_pending = ering->rx_pending;
@@ -12597,6 +12590,7 @@ static int tg3_set_ringparam(struct net_device *dev,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err)
tg3_phy_start(tp);
@@ -12678,6 +12672,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
if (epause->autoneg)
@@ -12707,6 +12702,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
}
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
@@ -13911,6 +13907,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
data[TG3_INTERRUPT_TEST] = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -13922,6 +13919,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err2)
tg3_phy_start(tp);
@@ -14365,6 +14363,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu);
+ netdev_lock(dev);
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -14384,6 +14383,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -18164,6 +18164,7 @@ static int tg3_resume(struct device *device)
netif_device_attach(dev);
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
@@ -18180,6 +18181,7 @@ static int tg3_resume(struct device *device)
out:
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -18192,6 +18194,50 @@ unlock:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
+ * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
+ * be, powered down.
+ */
+static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
+ },
+ },
+ {}
+};
+
static void tg3_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -18208,6 +18254,19 @@ static void tg3_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF)
tg3_power_down(tp);
+ else if (system_state == SYSTEM_RESTART &&
+ dmi_first_match(tg3_restart_aer_quirk_table) &&
+ pdev->current_state != PCI_D3cold &&
+ pdev->current_state != PCI_UNKNOWN) {
+ /* Disable PCIe AER on the tg3 to avoid a fatal
+ * error during this system restart.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE);
+ }
rtnl_unlock();
@@ -18260,7 +18319,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
done:
if (state == pci_channel_io_perm_failure) {
if (netdev) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
err = PCI_ERS_RESULT_DISCONNECT;
@@ -18314,7 +18375,9 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
done:
if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
rtnl_unlock();
@@ -18340,12 +18403,14 @@ static void tg3_io_resume(struct pci_dev *pdev)
if (!netdev || !netif_running(netdev))
goto done;
+ netdev_lock(netdev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
tg3_flag_set(tp, INIT_COMPLETE);
err = tg3_restart_hw(tp, true);
if (err) {
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
netdev_err(netdev, "Cannot restart hardware after reset.\n");
goto done;
}
@@ -18357,6 +18422,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
tg3_phy_start(tp);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 9c80ab07a735..92c7639d1fc7 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -314,13 +314,13 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
{
switch (event) {
case IOC_E_FWRSP_GETATTR:
- del_timer(&ioc->ioc_timer);
+ timer_delete(&ioc->ioc_timer);
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
- del_timer(&ioc->ioc_timer);
+ timer_delete(&ioc->ioc_timer);
fallthrough;
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -330,7 +330,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
break;
case IOC_E_DISABLE:
- del_timer(&ioc->ioc_timer);
+ timer_delete(&ioc->ioc_timer);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
@@ -659,13 +659,13 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
case IOCPF_E_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
bfa_ioc_pf_disabled(ioc);
break;
case IOCPF_E_STOP:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
@@ -741,7 +741,7 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
case IOCPF_E_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_ioc_sync_leave(ioc);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
@@ -774,13 +774,13 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_FWRSP_ENABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
break;
case IOCPF_E_INITFAIL:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
fallthrough;
case IOCPF_E_TIMEOUT:
@@ -791,7 +791,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
case IOCPF_E_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
@@ -844,12 +844,12 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_FWRSP_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FAIL:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
fallthrough;
case IOCPF_E_TIMEOUT:
@@ -1210,7 +1210,7 @@ bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
static void
bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
{
- del_timer(&ioc->sem_timer);
+ timer_delete(&ioc->sem_timer);
}
/* Initialize LPU local memory (aka secondary memory / SRAM) */
@@ -1982,7 +1982,7 @@ bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
static void
bfa_ioc_hb_stop(struct bfa_ioc *ioc)
{
- del_timer(&ioc->hb_timer);
+ timer_delete(&ioc->hb_timer);
}
/* Initiate a full firmware download. */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ece6f3b48327..9bed33295839 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -19,6 +19,7 @@
#include <linux/ip.h>
#include <linux/prefetch.h>
#include <linux/module.h>
+#include <net/gro.h>
#include "bnad.h"
#include "bna.h"
@@ -1687,7 +1688,8 @@ err_return:
static void
bnad_ioc_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.ioc_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1698,7 +1700,8 @@ bnad_ioc_timeout(struct timer_list *t)
static void
bnad_ioc_hb_check(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.hb_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1709,7 +1712,8 @@ bnad_ioc_hb_check(struct timer_list *t)
static void
bnad_iocpf_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.iocpf_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1720,7 +1724,8 @@ bnad_iocpf_timeout(struct timer_list *t)
static void
bnad_iocpf_sem_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.sem_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1734,7 +1739,7 @@ bnad_iocpf_sem_timeout(struct timer_list *t)
* Time CPU m CPU n
* 0 1 = test_bit
* 1 clear_bit
- * 2 del_timer_sync
+ * 2 timer_delete_sync
* 3 mod_timer
*/
@@ -1742,7 +1747,7 @@ bnad_iocpf_sem_timeout(struct timer_list *t)
static void
bnad_dim_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, dim_timer);
+ struct bnad *bnad = timer_container_of(bnad, t, dim_timer);
struct bnad_rx_info *rx_info;
struct bnad_rx_ctrl *rx_ctrl;
int i, j;
@@ -1775,7 +1780,7 @@ bnad_dim_timeout(struct timer_list *t)
static void
bnad_stats_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, stats_timer);
+ struct bnad *bnad = timer_container_of(bnad, t, stats_timer);
unsigned long flags;
if (!netif_running(bnad->netdev) ||
@@ -1836,7 +1841,7 @@ bnad_stats_timer_stop(struct bnad *bnad)
to_del = 1;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (to_del)
- del_timer_sync(&bnad->stats_timer);
+ timer_delete_sync(&bnad->stats_timer);
}
/* Utilities */
@@ -2159,7 +2164,7 @@ bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (to_del)
- del_timer_sync(&bnad->dim_timer);
+ timer_delete_sync(&bnad->dim_timer);
}
init_completion(&bnad->bnad_completions.rx_comp);
@@ -3725,9 +3730,9 @@ probe_uninit:
bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
disable_ioceth:
bnad_ioceth_disable(bnad);
- del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.sem_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.hb_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_uninit(bna);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3768,9 +3773,9 @@ bnad_pci_remove(struct pci_dev *pdev)
mutex_lock(&bnad->conf_mutex);
bnad_ioceth_disable(bnad);
- del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.sem_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.hb_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_uninit(bna);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index d1ad6c9f8140..216e25f26dbb 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -373,7 +373,7 @@ static int bnad_set_coalesce(struct net_device *netdev,
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (to_del)
- del_timer_sync(&bnad->dim_timer);
+ timer_delete_sync(&bnad->dim_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bnad_rx_coalescing_timeo_set(bnad);
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 5740c98d8c9f..c9a5c8beb2fa 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -951,75 +951,73 @@ struct macb_tx_skb {
* device stats by a periodic timer.
*/
struct macb_stats {
- u32 rx_pause_frames;
- u32 tx_ok;
- u32 tx_single_cols;
- u32 tx_multiple_cols;
- u32 rx_ok;
- u32 rx_fcs_errors;
- u32 rx_align_errors;
- u32 tx_deferred;
- u32 tx_late_cols;
- u32 tx_excessive_cols;
- u32 tx_underruns;
- u32 tx_carrier_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_symbol_errors;
- u32 rx_oversize_pkts;
- u32 rx_jabbers;
- u32 rx_undersize_pkts;
- u32 sqe_test_errors;
- u32 rx_length_mismatch;
- u32 tx_pause_frames;
+ u64 rx_pause_frames;
+ u64 tx_ok;
+ u64 tx_single_cols;
+ u64 tx_multiple_cols;
+ u64 rx_ok;
+ u64 rx_fcs_errors;
+ u64 rx_align_errors;
+ u64 tx_deferred;
+ u64 tx_late_cols;
+ u64 tx_excessive_cols;
+ u64 tx_underruns;
+ u64 tx_carrier_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_symbol_errors;
+ u64 rx_oversize_pkts;
+ u64 rx_jabbers;
+ u64 rx_undersize_pkts;
+ u64 sqe_test_errors;
+ u64 rx_length_mismatch;
+ u64 tx_pause_frames;
};
struct gem_stats {
- u32 tx_octets_31_0;
- u32 tx_octets_47_32;
- u32 tx_frames;
- u32 tx_broadcast_frames;
- u32 tx_multicast_frames;
- u32 tx_pause_frames;
- u32 tx_64_byte_frames;
- u32 tx_65_127_byte_frames;
- u32 tx_128_255_byte_frames;
- u32 tx_256_511_byte_frames;
- u32 tx_512_1023_byte_frames;
- u32 tx_1024_1518_byte_frames;
- u32 tx_greater_than_1518_byte_frames;
- u32 tx_underrun;
- u32 tx_single_collision_frames;
- u32 tx_multiple_collision_frames;
- u32 tx_excessive_collisions;
- u32 tx_late_collisions;
- u32 tx_deferred_frames;
- u32 tx_carrier_sense_errors;
- u32 rx_octets_31_0;
- u32 rx_octets_47_32;
- u32 rx_frames;
- u32 rx_broadcast_frames;
- u32 rx_multicast_frames;
- u32 rx_pause_frames;
- u32 rx_64_byte_frames;
- u32 rx_65_127_byte_frames;
- u32 rx_128_255_byte_frames;
- u32 rx_256_511_byte_frames;
- u32 rx_512_1023_byte_frames;
- u32 rx_1024_1518_byte_frames;
- u32 rx_greater_than_1518_byte_frames;
- u32 rx_undersized_frames;
- u32 rx_oversize_frames;
- u32 rx_jabbers;
- u32 rx_frame_check_sequence_errors;
- u32 rx_length_field_frame_errors;
- u32 rx_symbol_errors;
- u32 rx_alignment_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_ip_header_checksum_errors;
- u32 rx_tcp_checksum_errors;
- u32 rx_udp_checksum_errors;
+ u64 tx_octets;
+ u64 tx_frames;
+ u64 tx_broadcast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_pause_frames;
+ u64 tx_64_byte_frames;
+ u64 tx_65_127_byte_frames;
+ u64 tx_128_255_byte_frames;
+ u64 tx_256_511_byte_frames;
+ u64 tx_512_1023_byte_frames;
+ u64 tx_1024_1518_byte_frames;
+ u64 tx_greater_than_1518_byte_frames;
+ u64 tx_underrun;
+ u64 tx_single_collision_frames;
+ u64 tx_multiple_collision_frames;
+ u64 tx_excessive_collisions;
+ u64 tx_late_collisions;
+ u64 tx_deferred_frames;
+ u64 tx_carrier_sense_errors;
+ u64 rx_octets;
+ u64 rx_frames;
+ u64 rx_broadcast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_pause_frames;
+ u64 rx_64_byte_frames;
+ u64 rx_65_127_byte_frames;
+ u64 rx_128_255_byte_frames;
+ u64 rx_256_511_byte_frames;
+ u64 rx_512_1023_byte_frames;
+ u64 rx_1024_1518_byte_frames;
+ u64 rx_greater_than_1518_byte_frames;
+ u64 rx_undersized_frames;
+ u64 rx_oversize_frames;
+ u64 rx_jabbers;
+ u64 rx_frame_check_sequence_errors;
+ u64 rx_length_field_frame_errors;
+ u64 rx_symbol_errors;
+ u64 rx_alignment_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_ip_header_checksum_errors;
+ u64 rx_tcp_checksum_errors;
+ u64 rx_udp_checksum_errors;
};
/* Describes the name and offset of an individual statistic register, as
@@ -1027,7 +1025,7 @@ struct gem_stats {
* this register should contribute to.
*/
struct gem_statistic {
- char stat_string[ETH_GSTRING_LEN];
+ char stat_string[ETH_GSTRING_LEN] __nonstring;
int offset;
u32 stat_bits;
};
@@ -1279,6 +1277,8 @@ struct macb {
struct clk *rx_clk;
struct clk *tsu_clk;
struct net_device *dev;
+ /* Protects hw_stats and ethtool_stats */
+ spinlock_t stats_lock;
union {
struct macb_stats macb;
struct gem_stats gem;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index daa416fb1724..d1f1ae5ea161 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -17,8 +17,6 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -26,7 +24,6 @@
#include <linux/platform_device.h>
#include <linux/phylink.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/ip.h>
@@ -530,19 +527,9 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
return;
- switch (speed) {
- case SPEED_10:
- rate = 2500000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0)
return;
- }
rate_rounded = clk_round_rate(bp->tx_clk, rate);
if (rate_rounded < 0)
@@ -578,6 +565,7 @@ static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
@@ -608,7 +596,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs,
return 0;
}
-static void macb_pcs_get_state(struct phylink_pcs *pcs,
+static void macb_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
state->link = 0;
@@ -862,9 +850,7 @@ static int macb_mii_probe(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
- bp->phylink_sgmii_pcs.neg_mode = true;
bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
- bp->phylink_usx_pcs.neg_mode = true;
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
@@ -999,8 +985,8 @@ err_out:
static void macb_update_stats(struct macb *bp)
{
- u32 *p = &bp->hw_stats.macb.rx_pause_frames;
- u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+ u64 *p = &bp->hw_stats.macb.rx_pause_frames;
+ u64 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
int offset = MACB_PFR;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
@@ -1011,22 +997,15 @@ static void macb_update_stats(struct macb *bp)
static int macb_halt_tx(struct macb *bp)
{
- unsigned long halt_time, timeout;
- u32 status;
+ u32 status;
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
- timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
- do {
- halt_time = jiffies;
- status = macb_readl(bp, TSR);
- if (!(status & MACB_BIT(TGO)))
- return 0;
-
- udelay(250);
- } while (time_before(halt_time, timeout));
-
- return -ETIMEDOUT;
+ /* Poll TSR until TGO is cleared or timeout. */
+ return read_poll_timeout_atomic(macb_readl, status,
+ !(status & MACB_BIT(TGO)),
+ 250, MACB_HALT_TIMEOUT, false,
+ bp, TSR);
}
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
@@ -1090,15 +1069,18 @@ static void macb_tx_error_task(struct work_struct *work)
tx_error_task);
bool halt_timeout = false;
struct macb *bp = queue->bp;
+ u32 queue_index;
+ u32 packets = 0;
+ u32 bytes = 0;
struct macb_tx_skb *tx_skb;
struct macb_dma_desc *desc;
struct sk_buff *skb;
unsigned int tail;
unsigned long flags;
+ queue_index = queue - bp->queues;
netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
- (unsigned int)(queue - bp->queues),
- queue->tx_tail, queue->tx_head);
+ queue_index, queue->tx_tail, queue->tx_head);
/* Prevent the queue NAPI TX poll from running, as it calls
* macb_tx_complete(), which in turn may call netif_wake_subqueue().
@@ -1151,8 +1133,10 @@ static void macb_tx_error_task(struct work_struct *work)
skb->data);
bp->dev->stats.tx_packets++;
queue->stats.tx_packets++;
+ packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
+ bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -1169,6 +1153,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_unmap(bp, tx_skb, 0);
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
macb_set_addr(bp, desc, 0);
@@ -1239,6 +1226,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
unsigned int tail;
unsigned int head;
int packets = 0;
+ u32 bytes = 0;
spin_lock(&queue->tx_ptr_lock);
head = queue->tx_head;
@@ -1280,6 +1268,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
packets++;
+ bytes += skb->len;
}
/* Now we can safely release resources */
@@ -1294,6 +1283,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
}
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
queue->tx_tail = tail;
if (__netif_subqueue_stopped(bp->dev, queue_index) &&
CIRC_CNT(queue->tx_head, queue->tx_tail,
@@ -1987,10 +1979,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
+ spin_lock(&bp->stats_lock);
if (macb_is_gem(bp))
bp->hw_stats.gem.rx_overruns++;
else
bp->hw_stats.macb.rx_overruns++;
+ spin_unlock(&bp->stats_lock);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
@@ -2395,6 +2389,8 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Make newly initialized descriptor visible to hardware */
wmb();
skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ skb->len);
spin_lock_irq(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
@@ -3030,6 +3026,7 @@ static int macb_close(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_disable(&queue->napi_rx);
napi_disable(&queue->napi_tx);
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
}
phylink_stop(bp->phylink);
@@ -3080,7 +3077,7 @@ static void gem_update_stats(struct macb *bp)
unsigned int i, q, idx;
unsigned long *stat;
- u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+ u64 *p = &bp->hw_stats.gem.tx_octets;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
u32 offset = gem_statistics[i].offset;
@@ -3093,7 +3090,7 @@ static void gem_update_stats(struct macb *bp)
/* Add GEM_OCTTXH, GEM_OCTRXH */
val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
- *(++p) += val;
+ *(p++) += ((u64)val) << 32;
}
}
@@ -3103,15 +3100,13 @@ static void gem_update_stats(struct macb *bp)
bp->ethtool_stats[idx++] = *stat;
}
-static struct net_device_stats *gem_get_stats(struct macb *bp)
+static void gem_get_stats(struct macb *bp, struct rtnl_link_stats64 *nstat)
{
struct gem_stats *hwstat = &bp->hw_stats.gem;
- struct net_device_stats *nstat = &bp->dev->stats;
- if (!netif_running(bp->dev))
- return nstat;
-
- gem_update_stats(bp);
+ spin_lock_irq(&bp->stats_lock);
+ if (netif_running(bp->dev))
+ gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
hwstat->rx_alignment_errors +
@@ -3140,19 +3135,19 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
nstat->tx_fifo_errors = hwstat->tx_underrun;
-
- return nstat;
+ spin_unlock_irq(&bp->stats_lock);
}
static void gem_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct macb *bp;
+ struct macb *bp = netdev_priv(dev);
- bp = netdev_priv(dev);
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
memcpy(data, &bp->ethtool_stats, sizeof(u64)
* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
+ spin_unlock_irq(&bp->stats_lock);
}
static int gem_get_sset_count(struct net_device *dev, int sset)
@@ -3192,16 +3187,20 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
}
}
-static struct net_device_stats *macb_get_stats(struct net_device *dev)
+static void macb_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct macb *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &bp->dev->stats;
struct macb_stats *hwstat = &bp->hw_stats.macb;
- if (macb_is_gem(bp))
- return gem_get_stats(bp);
+ netdev_stats_to_stats64(nstat, &bp->dev->stats);
+ if (macb_is_gem(bp)) {
+ gem_get_stats(bp, nstat);
+ return;
+ }
/* read stats from hardware */
+ spin_lock_irq(&bp->stats_lock);
macb_update_stats(bp);
/* Convert HW stats into netdevice stats */
@@ -3235,8 +3234,171 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
nstat->tx_fifo_errors = hwstat->tx_underruns;
/* Don't know about heartbeat or window errors... */
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
- return nstat;
+static void macb_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_ok;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_cols;
+ mac_stats->MultipleCollisionFrames = hwstat->tx_multiple_cols;
+ mac_stats->FramesReceivedOK = hwstat->rx_ok;
+ mac_stats->FrameCheckSequenceErrors = hwstat->rx_fcs_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_align_errors;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred;
+ mac_stats->LateCollisions = hwstat->tx_late_cols;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_cols;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underruns;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_errors;
+ mac_stats->FramesLostDueToIntMACRcvError = hwstat->rx_overruns;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_mismatch;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_pkts;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_frames;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_collision_frames;
+ mac_stats->MultipleCollisionFrames =
+ hwstat->tx_multiple_collision_frames;
+ mac_stats->FramesReceivedOK = hwstat->rx_frames;
+ mac_stats->FrameCheckSequenceErrors =
+ hwstat->rx_frame_check_sequence_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_alignment_errors;
+ mac_stats->OctetsTransmittedOK = hwstat->tx_octets;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred_frames;
+ mac_stats->LateCollisions = hwstat->tx_late_collisions;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_collisions;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underrun;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_sense_errors;
+ mac_stats->OctetsReceivedOK = hwstat->rx_octets;
+ mac_stats->MulticastFramesXmittedOK = hwstat->tx_multicast_frames;
+ mac_stats->BroadcastFramesXmittedOK = hwstat->tx_broadcast_frames;
+ mac_stats->MulticastFramesReceivedOK = hwstat->rx_multicast_frames;
+ mac_stats->BroadcastFramesReceivedOK = hwstat->rx_broadcast_frames;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_field_frame_errors;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+/* TODO: Report SQE test errors when added to phy_stats */
+static void macb_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersize_pkts;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_pkts;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range gem_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 16384 },
+ { },
+};
+
+static void gem_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersized_frames;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_frames;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ rmon_stats->hist[0] = hwstat->rx_64_byte_frames;
+ rmon_stats->hist[1] = hwstat->rx_65_127_byte_frames;
+ rmon_stats->hist[2] = hwstat->rx_128_255_byte_frames;
+ rmon_stats->hist[3] = hwstat->rx_256_511_byte_frames;
+ rmon_stats->hist[4] = hwstat->rx_512_1023_byte_frames;
+ rmon_stats->hist[5] = hwstat->rx_1024_1518_byte_frames;
+ rmon_stats->hist[6] = hwstat->rx_greater_than_1518_byte_frames;
+ rmon_stats->hist_tx[0] = hwstat->tx_64_byte_frames;
+ rmon_stats->hist_tx[1] = hwstat->tx_65_127_byte_frames;
+ rmon_stats->hist_tx[2] = hwstat->tx_128_255_byte_frames;
+ rmon_stats->hist_tx[3] = hwstat->tx_256_511_byte_frames;
+ rmon_stats->hist_tx[4] = hwstat->tx_512_1023_byte_frames;
+ rmon_stats->hist_tx[5] = hwstat->tx_1024_1518_byte_frames;
+ rmon_stats->hist_tx[6] = hwstat->tx_greater_than_1518_byte_frames;
+ spin_unlock_irq(&bp->stats_lock);
+ *ranges = gem_rmon_ranges;
}
static int macb_get_regs_len(struct net_device *netdev)
@@ -3765,6 +3927,10 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_pause_stats = macb_get_pause_stats,
+ .get_eth_mac_stats = macb_get_eth_mac_stats,
+ .get_eth_phy_stats = macb_get_eth_phy_stats,
+ .get_rmon_stats = macb_get_rmon_stats,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
.get_link_ksettings = macb_get_link_ksettings,
@@ -3783,6 +3949,10 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
+ .get_pause_stats = gem_get_pause_stats,
+ .get_eth_mac_stats = gem_get_eth_mac_stats,
+ .get_eth_phy_stats = gem_get_eth_phy_stats,
+ .get_rmon_stats = gem_get_rmon_stats,
.get_link_ksettings = macb_get_link_ksettings,
.set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
@@ -3919,7 +4089,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_stop = macb_close,
.ndo_start_xmit = macb_start_xmit,
.ndo_set_rx_mode = macb_set_rx_mode,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = macb_change_mtu,
@@ -4580,7 +4750,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_open = at91ether_open,
.ndo_stop = at91ether_close,
.ndo_start_xmit = at91ether_start_xmit,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_eth_ioctl = macb_ioctl,
@@ -5106,13 +5276,18 @@ static int macb_probe(struct platform_device *pdev)
}
}
spin_lock_init(&bp->lock);
+ spin_lock_init(&bp->stats_lock);
/* setup capabilities */
macb_configure_caps(bp, macb_config);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (err) {
+ dev_err(&pdev->dev, "failed to set DMA mask\n");
+ goto err_out_free_netdev;
+ }
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index ca742cc146d7..7dae5aad3689 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -70,8 +70,8 @@ config LIQUIDIO
depends on 64BIT && PCI
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
select FW_LOADER
- select LIBCRC32C
select LIQUIDIO_CORE
select NET_DEVLINK
help
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 9ad49aea2673..ff8f2f9f9cae 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -49,7 +49,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
- /* Wait for 100ms as Octeon resets. */
+ /* Wait for 100ms as Octeon resets */
mdelay(100);
if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
@@ -61,7 +61,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
oct->octeon_id);
- /* restore the reset value*/
+ /* Restore the reset value */
octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
return 0;
@@ -121,7 +121,7 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
oqticks_per_us /= 1024;
/* time_intr is in microseconds. The next 2 steps gives the oq ticks
- * corressponding to time_intr.
+ * corresponding to time_intr.
*/
oqticks_per_us *= time_intr_in_us;
oqticks_per_us /= 1000;
@@ -136,11 +136,11 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
u64 reg_val;
u64 temp;
- /* programming SRN and TRS for each MAC(0..3) */
+ /* Programming SRN and TRS for each MAC(0..3) */
dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
__func__, mac_no);
- /* By default, mapping all 64 IOQs to a single MACs */
+ /* By default, map all 64 IOQs to a single MAC */
reg_val =
octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
@@ -164,7 +164,7 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
temp = oct->sriov_info.max_vfs & 0xff;
reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
- /* write these settings to MAC register */
+ /* Write these settings to MAC register */
octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
reg_val);
@@ -183,10 +183,10 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
srn = oct->sriov_info.pf_srn;
ern = srn + oct->sriov_info.num_pf_rings;
- /*As per HRM reg description, s/w cant write 0 to ENB. */
- /*to make the queue off, need to set the RST bit. */
+ /* As per HRM reg description, s/w can't write 0 to ENB. */
+ /* We need to set the RST bit, to turn the queue off. */
- /* Reset the Enable bit for all the 64 IQs. */
+ /* Reset the enable bit for all the 64 IQs. */
for (q_no = srn; q_no < ern; q_no++) {
/* set RST bit to 1. This bit applies to both IQ and OQ */
d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -194,7 +194,7 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
}
- /*wait until the RST bit is clear or the RST and quite bits are set*/
+ /* Wait until the RST bit is clear or the RST and quiet bits are set */
for (q_no = srn; q_no < ern; q_no++) {
u64 reg_val = octeon_read_csr64(oct,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -245,15 +245,15 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
if (cn23xx_reset_io_queues(oct))
return -1;
- /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
- * for all queues.Only PF can set these bits.
+ /* Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
+ * for all queues. Only PF can set these bits.
* bits 29:30 indicate the MAC num.
* bits 32:47 indicate the PVF num.
*/
for (q_no = 0; q_no < ern; q_no++) {
reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
- /* for VF assigned queues. */
+ /* For VF assigned queues. */
if (q_no < oct->sriov_info.pf_srn) {
vf_num = q_no / oct->sriov_info.rings_per_vf;
vf_num += 1; /* VF1, VF2,........ */
@@ -268,7 +268,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
reg_val);
}
- /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ /* Select ES, RO, NS, RDSIZE,DPTR Format#0 for
* pf queues
*/
for (q_no = srn; q_no < ern; q_no++) {
@@ -289,7 +289,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
reg_val);
- /* Set WMARK level for triggering PI_INT */
+ /* Set WMARK level to trigger PI_INT */
/* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
CN23XX_PKT_IN_DONE_WMARK_MASK;
@@ -354,7 +354,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/* set the ES bit */
reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
- /* write all the selected settings */
+ /* Write all the selected settings */
octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
/* Enabling these interrupt in oct->fn_list.enable_interrupt()
@@ -373,7 +373,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/** Setting the water mark level for pko back pressure **/
writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
- /** Disabling setting OQs in reset when ring has no dorebells
+ /* Disabling setting OQs in reset when ring has no doorbells
* enabling this will cause of head of line blocking
*/
/* Do it only for pass1.1. and pass1.2 */
@@ -383,7 +383,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
CN23XX_SLI_GBL_CONTROL) | 0x2,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
- /** Enable channel-level backpressure */
+ /** Enable channel-level backpressure **/
if (oct->pf_num)
writeq(0xffffffffffffffffULL,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
@@ -396,7 +396,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
{
cn23xx_enable_error_reporting(oct);
- /* program the MAC(0..3)_RINFO before setting up input/output regs */
+ /* Program the MAC(0..3)_RINFO before setting up input/output regs */
cn23xx_setup_global_mac_regs(oct);
if (cn23xx_pf_setup_global_input_regs(oct))
@@ -410,7 +410,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
CN23XX_SLI_WINDOW_CTL_DEFAULT);
- /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
+ /* Set SLI_PKT_IN_JABBER to handle large VXLAN packets */
octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
return 0;
}
@@ -574,7 +574,7 @@ static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
- /*Mail Box Thread creation*/
+ /* Mail Box Thread creation */
INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
cn23xx_pf_mbox_thread);
mbox->mbox_poll_wk.ctxptr = (void *)mbox;
@@ -626,7 +626,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
ern = srn + oct->num_iqs;
for (q_no = srn; q_no < ern; q_no++) {
- /* set the corresponding IQ IS_64B bit */
+ /* Set the corresponding IQ IS_64B bit */
if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -635,7 +635,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
}
- /* set the corresponding IQ ENB bit */
+ /* Set the corresponding IQ ENB bit */
if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
/* IOQs are in reset by default in PEM2 mode,
* clearing reset bit
@@ -681,7 +681,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
}
for (q_no = srn; q_no < ern; q_no++) {
u32 reg_val;
- /* set the corresponding OQ ENB bit */
+ /* Set the corresponding OQ ENB bit */
if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr(
oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
@@ -707,7 +707,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
loop = HZ;
- /* start the Reset for a particular ring */
+ /* Start the Reset for a particular ring */
WRITE_ONCE(d64, octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
WRITE_ONCE(d64, READ_ONCE(d64) &
@@ -740,7 +740,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
loop = HZ;
/* Wait until hardware indicates that the particular IQ
- * is out of reset.It given that SLI_PKT_RING_RST is
+ * is out of reset. Given that SLI_PKT_RING_RST is
* common for both IQs and OQs
*/
WRITE_ONCE(d64, octeon_read_csr64(
@@ -760,7 +760,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
schedule_timeout_uninterruptible(1);
}
- /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
+ /* Clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
WRITE_ONCE(d32, octeon_read_csr(
oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
@@ -793,7 +793,7 @@ static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
return ret;
- /* Write count reg in sli_pkt_cnts to clear these int.*/
+ /* Write count reg in sli_pkt_cnts to clear these int. */
if ((pkts_sent & CN23XX_INTR_PO_INT) ||
(pkts_sent & CN23XX_INTR_PI_INT)) {
if (pkts_sent & CN23XX_INTR_PO_INT)
@@ -908,7 +908,7 @@ static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
}
-/* always call with lock held */
+/* Always call with lock held */
static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
{
u32 new_idx;
@@ -919,7 +919,7 @@ static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
iq->pkt_in_done = pkt_in_done;
/* Modulo of the new index with the IQ size will give us
- * the new index. The iq->reset_instr_cnt is always zero for
+ * the new index. The iq->reset_instr_cnt is always zero for
* cn23xx, so no extra adjustments are needed.
*/
new_idx = (iq->octeon_read_index +
@@ -934,8 +934,8 @@ static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
u64 intr_val = 0;
- /* Divide the single write to multiple writes based on the flag. */
- /* Enable Interrupt */
+ /* Divide the single write to multiple writes based on the flag. */
+ /* Enable Interrupts */
if (intr_flag == OCTEON_ALL_INTR) {
writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
} else if (intr_flag & OCTEON_OUTPUT_INTR) {
@@ -990,7 +990,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
ret = 0;
- /** Read Function Dependency Link reg to get the function number */
+ /* Read Function Dependency Link reg to get the function number */
if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
&fdl_bit) == 0) {
oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
@@ -1003,13 +1003,13 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
* In this case, read the PF number from the
* SLI_PKT0_INPUT_CONTROL reg (written by f/w)
*/
- pkt0_in_ctl = octeon_read_csr64(oct,
- CN23XX_SLI_IQ_PKT_CONTROL64(0));
+ pkt0_in_ctl =
+ octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(0));
pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
- /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/
+ /* Validate PF num by reading RINFO; f/w writes RINFO.trs == 1 */
d64 = octeon_read_csr64(oct,
CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
@@ -1252,9 +1252,9 @@ int cn23xx_fw_loaded(struct octeon_device *oct)
u64 val;
/* If there's more than one active PF on this NIC, then that
- * implies that the NIC firmware is loaded and running. This check
+ * implies that the NIC firmware is loaded and running. This check
* prevents a rare false negative that might occur if we only relied
- * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
+ * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
* happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
* though the firmware was already loaded but still booting and has yet
* to set SCR2_BIT_FW_LOADED.
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 6b6cb73482d7..1753bb87dfbd 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1433,22 +1433,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
}
EXPORT_SYMBOL_GPL(octeon_wait_for_ddr_init);
-/* Get the octeon id assigned to the octeon device passed as argument.
- * This function is exported to other modules.
- * @param dev - octeon device pointer passed as a void *.
- * @return octeon device id
- */
-int lio_get_device_id(void *dev)
-{
- struct octeon_device *octeon_dev = (struct octeon_device *)dev;
- u32 i;
-
- for (i = 0; i < MAX_OCTEON_DEVICES; i++)
- if (octeon_device[i] == octeon_dev)
- return octeon_dev->octeon_id;
- return -1;
-}
-
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
{
u64 instr_cnt;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index d26364c2ac81..19344b21f8fb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -705,13 +705,6 @@ octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
*/
struct octeon_device *lio_get_device(u32 octeon_id);
-/** Get the octeon id assigned to the octeon device passed as argument.
- * This function is exported to other modules.
- * @param dev - octeon device pointer passed as a void *.
- * @return octeon device id
- */
-int lio_get_device_id(void *dev);
-
/** Read windowed register.
* @param oct - pointer to the Octeon device.
* @param addr - Address of the register to read.
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 06397cc8bb36..5211759bfe47 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1389,11 +1389,9 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
- /* Check if timestamp is requested */
- if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- skb_tx_timestamp(skb);
+ /* Check if hw timestamp is requested */
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
return;
- }
/* Tx timestamping not supported along with TSO, so ignore request */
if (skb_shinfo(skb)->gso_size)
@@ -1472,6 +1470,8 @@ static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
netdev_tx_sent_queue(txq, skb->len);
+ skb_tx_timestamp(skb);
+
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 608cc6af5af1..3b7ad744b2dd 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1605,10 +1605,10 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return dev_err_probe(dev, err, "Failed to enable PCI device\n");
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
- goto err_disable_device;
+ goto err_zero_drv_data;
}
/* MAP configuration registers */
@@ -1616,7 +1616,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!bgx->reg_base) {
dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_zero_drv_data;
}
set_max_bgx_per_node(pdev);
@@ -1688,10 +1688,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_enable:
bgx_vnic[bgx->bgx_id] = NULL;
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
-err_release_regions:
- pci_release_regions(pdev);
-err_disable_device:
- pci_disable_device(pdev);
+err_zero_drv_data:
pci_set_drvdata(pdev, NULL);
return err;
}
@@ -1710,8 +1707,6 @@ static void bgx_remove(struct pci_dev *pdev)
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
bgx_vnic[bgx->bgx_id] = NULL;
- pci_release_regions(pdev);
- pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 3b7068832f95..4a0e2d2eb60a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -351,7 +351,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
adapter->msg_enable = val;
}
-static const char stats_strings[][ETH_GSTRING_LEN] = {
+static const char stats_strings[][ETH_GSTRING_LEN] __nonstring_array = {
"TxOctetsOK",
"TxOctetsBad",
"TxUnicastFramesOK",
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 861edff5ed89..5f354cf62cdd 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1922,7 +1922,7 @@ send:
static void sge_tx_reclaim_cb(struct timer_list *t)
{
int i;
- struct sge *sge = from_timer(sge, t, tx_reclaim_timer);
+ struct sge *sge = timer_container_of(sge, t, tx_reclaim_timer);
for (i = 0; i < SGE_CMDQ_N; ++i) {
struct cmdQ *q = &sge->cmdQ[i];
@@ -1984,9 +1984,9 @@ void t1_sge_stop(struct sge *sge)
readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
if (is_T2(sge->adapter))
- del_timer_sync(&sge->espibug_timer);
+ timer_delete_sync(&sge->espibug_timer);
- del_timer_sync(&sge->tx_reclaim_timer);
+ timer_delete_sync(&sge->tx_reclaim_timer);
if (sge->tx_sched)
tx_sched_stop(sge);
@@ -2017,7 +2017,7 @@ void t1_sge_start(struct sge *sge)
*/
static void espibug_workaround_t204(struct timer_list *t)
{
- struct sge *sge = from_timer(sge, t, espibug_timer);
+ struct sge *sge = timer_container_of(sge, t, espibug_timer);
struct adapter *adapter = sge->adapter;
unsigned int nports = adapter->params.nports;
u32 seop[MAX_NPORTS];
@@ -2060,7 +2060,7 @@ static void espibug_workaround_t204(struct timer_list *t)
static void espibug_workaround(struct timer_list *t)
{
- struct sge *sge = from_timer(sge, t, espibug_timer);
+ struct sge *sge = timer_container_of(sge, t, espibug_timer);
struct adapter *adapter = sge->adapter;
if (netif_running(adapter->port[0].dev)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 6268f96cb4aa..b59735d0e065 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2906,7 +2906,7 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
*/
static void sge_timer_tx(struct timer_list *t)
{
- struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
+ struct sge_qset *qs = timer_container_of(qs, t, tx_reclaim_timer);
struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
@@ -2947,7 +2947,7 @@ static void sge_timer_tx(struct timer_list *t)
static void sge_timer_rx(struct timer_list *t)
{
spinlock_t *lock;
- struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
+ struct sge_qset *qs = timer_container_of(qs, t, rx_reclaim_timer);
struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
u32 status;
@@ -3223,9 +3223,9 @@ void t3_stop_sge_timers(struct adapter *adap)
struct sge_qset *q = &adap->sge.qs[i];
if (q->tx_reclaim_timer.function)
- del_timer_sync(&q->tx_reclaim_timer);
+ timer_delete_sync(&q->tx_reclaim_timer);
if (q->rx_reclaim_timer.function)
- del_timer_sync(&q->rx_reclaim_timer);
+ timer_delete_sync(&q->rx_reclaim_timer);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 75bd69ff61a8..95e6f015a6af 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1211,9 +1211,6 @@ struct adapter {
struct timer_list flower_stats_timer;
struct work_struct flower_stats_work;
- /* Ethtool Dump */
- struct ethtool_dump eth_dump;
-
/* HMA */
struct hma_data hma;
@@ -1233,6 +1230,10 @@ struct adapter {
/* Ethtool n-tuple */
struct cxgb4_ethtool_filter *ethtool_filters;
+
+ /* Ethtool Dump */
+ /* Must be last - ends in a flex-array member. */
+ struct ethtool_dump eth_dump;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -2076,7 +2077,7 @@ void t4_idma_monitor(struct adapter *adapter,
struct sge_idma_monitor_state *idma,
int hz, int ticks);
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
- unsigned int naddr, u8 *addr);
+ u8 start, unsigned int naddr, u8 *addr);
void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
u32 start_index, bool sleep_ok);
void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 7f3f5afa864f..1546c3db08f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -2270,6 +2270,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
if (!eth_filter->port[i].bmap) {
ret = -ENOMEM;
+ kvfree(eth_filter->port[i].loc_array);
goto free_eth_finfo;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 97a261d5357e..51395c96b2e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1799,7 +1799,10 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
struct adapter *adap = container_of(t, struct adapter, tids);
struct sk_buff *skb;
- WARN_ON(tid_out_of_range(&adap->tids, tid));
+ if (tid_out_of_range(&adap->tids, tid)) {
+ dev_err(adap->pdev_dev, "tid %d out of range\n", tid);
+ return;
+ }
if (t->tid_tab[tid - adap->tids.tid_base]) {
t->tid_tab[tid - adap->tids.tid_base] = NULL;
@@ -3234,7 +3237,7 @@ static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
dev_info(pi->adapter->pdev_dev,
"Setting MAC %pM on VF %d\n", mac, vf);
- ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
+ ret = t4_set_vf_mac_acl(adap, vf + 1, pi->lport, 1, mac);
if (!ret)
ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
return ret;
@@ -6477,10 +6480,11 @@ static const struct tlsdev_ops cxgb4_ktls_ops = {
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
-static int cxgb4_xfrm_add_state(struct xfrm_state *x,
+static int cxgb4_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
int ret;
if (!mutex_trylock(&uld_mutex)) {
@@ -6491,7 +6495,8 @@ static int cxgb4_xfrm_add_state(struct xfrm_state *x,
if (ret)
goto out_unlock;
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack);
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(dev, x,
+ extack);
out_unlock:
mutex_unlock(&uld_mutex);
@@ -6499,9 +6504,9 @@ out_unlock:
return ret;
}
-static void cxgb4_xfrm_del_state(struct xfrm_state *x)
+static void cxgb4_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
@@ -6511,15 +6516,15 @@ static void cxgb4_xfrm_del_state(struct xfrm_state *x)
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
- adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(dev, x);
out_unlock:
mutex_unlock(&uld_mutex);
}
-static void cxgb4_xfrm_free_state(struct xfrm_state *x)
+static void cxgb4_xfrm_free_state(struct net_device *dev, struct xfrm_state *x)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
@@ -6529,36 +6534,19 @@ static void cxgb4_xfrm_free_state(struct xfrm_state *x)
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
- adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(dev, x);
out_unlock:
mutex_unlock(&uld_mutex);
}
-static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- struct adapter *adap = netdev2adap(x->xso.dev);
- bool ret = false;
-
- if (!mutex_trylock(&uld_mutex)) {
- dev_dbg(adap->pdev_dev,
- "crypto uld critical resource is under use\n");
- return ret;
- }
- if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
- goto out_unlock;
-
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
-
-out_unlock:
- mutex_unlock(&uld_mutex);
- return ret;
-}
-
static void cxgb4_advance_esn_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
+ if (x->xso.dir != XFRM_DEV_OFFLOAD_IN)
+ return;
+
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
"crypto uld critical resource is under use\n");
@@ -6577,7 +6565,6 @@ static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
.xdo_dev_state_add = cxgb4_xfrm_add_state,
.xdo_dev_state_delete = cxgb4_xfrm_del_state,
.xdo_dev_state_free = cxgb4_xfrm_free_state,
- .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
.xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 69d045d769c4..0765d000eaef 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -1051,7 +1051,7 @@ static void ch_flower_stats_handler(struct work_struct *work)
static void ch_flower_stats_cb(struct timer_list *t)
{
- struct adapter *adap = from_timer(adap, t, flower_stats_timer);
+ struct adapter *adap = timer_container_of(adap, t, flower_stats_timer);
schedule_work(&adap->flower_stats_work);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
index b08356060fb4..7bab8da8f6e6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -29,7 +29,7 @@ static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops cxgb4_thermal_ops = {
+static const struct thermal_zone_device_ops cxgb4_thermal_ops = {
.get_temp = cxgb4_thermal_get_temp,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index a7d76a8ed050..64402e3646b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1533,7 +1533,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
q = &adap->sge.ethtxq[qidx + pi->first_qset];
}
- skb_tx_timestamp(skb);
reclaim_completed_tx(adap, &q->q, -1, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
@@ -1706,6 +1705,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
+ skb_tx_timestamp(skb);
+
if (immediate) {
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
@@ -2268,7 +2269,6 @@ static int ethofld_hard_xmit(struct net_device *dev,
d = &eosw_txq->desc[eosw_txq->last_pidx];
skb = d->skb;
- skb_tx_timestamp(skb);
wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
@@ -2373,6 +2373,7 @@ write_wr_headers:
eohw_txq->vlan_ins++;
txq_advance(&eohw_txq->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
@@ -4233,7 +4234,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
{
unsigned long m;
unsigned int i;
- struct adapter *adap = from_timer(adap, t, sge.rx_timer);
+ struct adapter *adap = timer_container_of(adap, t, sge.rx_timer);
struct sge *s = &adap->sge;
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
@@ -4268,7 +4269,7 @@ done:
static void sge_tx_timer_cb(struct timer_list *t)
{
- struct adapter *adap = from_timer(adap, t, sge.tx_timer);
+ struct adapter *adap = timer_container_of(adap, t, sge.tx_timer);
struct sge *s = &adap->sge;
unsigned long m, period;
unsigned int i, budget;
@@ -4996,9 +4997,9 @@ void t4_sge_stop(struct adapter *adap)
struct sge *s = &adap->sge;
if (s->rx_timer.function)
- del_timer_sync(&s->rx_timer);
+ timer_delete_sync(&s->rx_timer);
if (s->tx_timer.function)
- del_timer_sync(&s->tx_timer);
+ timer_delete_sync(&s->tx_timer);
if (is_offload(adap)) {
struct sge_uld_txq_info *txq_info;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 76de55306c4d..175bf9b13058 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -10215,11 +10215,12 @@ out:
* t4_set_vf_mac_acl - Set MAC address for the specified VF
* @adapter: The adapter
* @vf: one of the VFs instantiated by the specified PF
+ * @start: The start port id associated with specified VF
* @naddr: the number of MAC addresses
* @addr: the MAC address(es) to be set to the specified VF
*/
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
- unsigned int naddr, u8 *addr)
+ u8 start, unsigned int naddr, u8 *addr)
{
struct fw_acl_mac_cmd cmd;
@@ -10234,7 +10235,7 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
cmd.nmac = naddr;
- switch (adapter->pf) {
+ switch (start) {
case 3:
memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 5b1d746e6563..4e6ecb9c8dcc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2062,7 +2062,7 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter)
*/
static void sge_rx_timer_cb(struct timer_list *t)
{
- struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
+ struct adapter *adapter = timer_container_of(adapter, t, sge.rx_timer);
struct sge *s = &adapter->sge;
unsigned int i;
@@ -2121,7 +2121,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
*/
static void sge_tx_timer_cb(struct timer_list *t)
{
- struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
+ struct adapter *adapter = timer_container_of(adapter, t, sge.tx_timer);
struct sge *s = &adapter->sge;
unsigned int i, budget;
@@ -2609,9 +2609,9 @@ void t4vf_sge_stop(struct adapter *adapter)
struct sge *s = &adapter->sge;
if (s->rx_timer.function)
- del_timer_sync(&s->rx_timer);
+ timer_delete_sync(&s->rx_timer);
if (s->tx_timer.function)
- del_timer_sync(&s->tx_timer);
+ timer_delete_sync(&s->tx_timer);
}
/**
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index c7338ac6a5bb..ecd9a0bd5e18 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -71,21 +71,22 @@
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
-static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
-static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+static void ch_ipsec_xfrm_free_state(struct net_device *dev,
+ struct xfrm_state *x);
+static void ch_ipsec_xfrm_del_state(struct net_device *dev,
+ struct xfrm_state *x);
+static int ch_ipsec_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack);
static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = ch_ipsec_xfrm_add_state,
.xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
.xdo_dev_state_free = ch_ipsec_xfrm_free_state,
- .xdo_dev_offload_ok = ch_ipsec_offload_ok,
.xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
};
@@ -225,7 +226,8 @@ out:
* returns 0 on success, negative error if failed to send message to FPGA
* positive error if FPGA returned a bad response
*/
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+static int ch_ipsec_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
struct ipsec_sa_entry *sa_entry;
@@ -304,14 +306,16 @@ out:
return res;
}
-static void ch_ipsec_xfrm_del_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_del_state(struct net_device *dev,
+ struct xfrm_state *x)
{
/* do nothing */
if (!x->xso.offload_handle)
return;
}
-static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_free_state(struct net_device *dev,
+ struct xfrm_state *x)
{
struct ipsec_sa_entry *sa_entry;
@@ -323,20 +327,6 @@ static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
module_put(THIS_MODULE);
}
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IP options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
- return true;
-}
-
static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
{
/* do nothing */
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index e8e460a92e0e..4e2096e49684 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1640,6 +1640,7 @@ static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
return 0;
}
@@ -1903,7 +1904,6 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb,
th = tcp_hdr(nskb);
skb_offset = skb_tcp_all_headers(nskb);
data_len = nskb->len - skb_offset;
- skb_tx_timestamp(nskb);
if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
goto out;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 96fd31d75dfd..daa1ebaef511 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -346,8 +346,9 @@ static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
* driver. Once driver synthesizes cpl_pass_accept_req the skb will go
* through the regular cpl_pass_accept_req processing in TOM.
*/
- skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
- - pktshift, GFP_ATOMIC);
+ skb = alloc_skb(size_add(gl->tot_len,
+ sizeof(struct cpl_pass_accept_req)) -
+ pktshift, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig
index ad80c0fa96a6..96709875fe4f 100644
--- a/drivers/net/ethernet/cisco/enic/Kconfig
+++ b/drivers/net/ethernet/cisco/enic/Kconfig
@@ -6,5 +6,6 @@
config ENIC
tristate "Cisco VIC Ethernet NIC Support"
depends on PCI
+ select PAGE_POOL
help
This enables the support for the Cisco VIC Ethernet card.
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index c3b6febfdbe4..a96b8332e6e2 100644
--- a/drivers/net/ethernet/cisco/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
- enic_ethtool.o enic_api.o enic_clsf.o
+ enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o enic_wq.o
diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h
index 462c5435a206..bfb3f14e89f5 100644
--- a/drivers/net/ethernet/cisco/enic/cq_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_desc.h
@@ -40,28 +40,7 @@ struct cq_desc {
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
-static inline void cq_desc_dec(const struct cq_desc *desc_arg,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- const struct cq_desc *desc = desc_arg;
- const u8 type_color = desc->type_color;
-
- *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
-
- /*
- * Make sure color bit is read from desc *before* other fields
- * are read from desc. Hardware guarantees color bit is last
- * bit (byte) written. Adding the rmb() prevents the compiler
- * and/or CPU from reordering the reads which would potentially
- * result in reading stale values.
- */
-
- rmb();
-
- *type = type_color & CQ_DESC_TYPE_MASK;
- *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
- *completed_index = le16_to_cpu(desc->completed_index) &
- CQ_DESC_COMP_NDX_MASK;
-}
+#define CQ_DESC_32_FI_MASK (BIT(0) | BIT(1))
+#define CQ_DESC_64_FI_MASK (BIT(0) | BIT(1))
#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
index d25426470a29..50787cff29db 100644
--- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
@@ -17,12 +17,22 @@ struct cq_enet_wq_desc {
u8 type_color;
};
-static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-}
+/*
+ * Defines and Capabilities for CMD_CQ_ENTRY_SIZE_SET
+ */
+#define VNIC_RQ_ALL (~0ULL)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16 0
+#define VNIC_RQ_CQ_ENTRY_SIZE_32 1
+#define VNIC_RQ_CQ_ENTRY_SIZE_64 2
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_16)
+#define VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_32)
+#define VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_64)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT (VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE)
/* Completion queue descriptor: Ethernet receive queue, 16B */
struct cq_enet_rq_desc {
@@ -36,6 +46,45 @@ struct cq_enet_rq_desc {
u8 type_color;
};
+/* Completion queue descriptor: Ethernet receive queue, 32B */
+struct cq_enet_rq_desc_32 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 type_color;
+};
+
+/* Completion queue descriptor: Ethernet receive queue, 64B */
+struct cq_enet_rq_desc_64 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 reserved[32];
+ u8 type_color;
+};
+
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
@@ -88,85 +137,4 @@ struct cq_enet_rq_desc {
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
-static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
- u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
- u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
- u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
- u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
- u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
- u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
-{
- u16 completed_index_flags;
- u16 q_number_rss_type_flags;
- u16 bytes_written_flags;
-
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-
- completed_index_flags = le16_to_cpu(desc->completed_index_flags);
- q_number_rss_type_flags =
- le16_to_cpu(desc->q_number_rss_type_flags);
- bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
-
- *ingress_port = (completed_index_flags &
- CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
- *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
- 1 : 0;
- *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
- 1 : 0;
- *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
- 1 : 0;
-
- *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
- CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
- *csum_not_calc = (q_number_rss_type_flags &
- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
-
- *rss_hash = le32_to_cpu(desc->rss_hash);
-
- *bytes_written = bytes_written_flags &
- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
- *packet_error = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
- *vlan_stripped = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
-
- /*
- * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
- */
- *vlan_tci = le16_to_cpu(desc->vlan);
-
- if (*fcoe) {
- *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
- CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
- *fcoe_fc_crc_ok = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
- *fcoe_enc_error = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
- *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
- CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
- CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
- *checksum = 0;
- } else {
- *fcoe_sof = 0;
- *fcoe_fc_crc_ok = 0;
- *fcoe_enc_error = 0;
- *fcoe_eof = 0;
- *checksum = le16_to_cpu(desc->checksum_fcoe);
- }
-
- *tcp_udp_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
- *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
- *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
- *ipv4_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
- *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
- *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
- *ipv4_fragment =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
- *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
-}
-
#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 10b7e02ba4d0..301b3f3114af 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -17,6 +17,7 @@
#include "vnic_nic.h"
#include "vnic_rss.h"
#include <linux/irq.h>
+#include <net/page_pool/helpers.h>
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
@@ -25,11 +26,19 @@
#define ENIC_WQ_MAX 256
#define ENIC_RQ_MAX 256
+#define ENIC_RQ_MIN_DEFAULT 8
#define ENIC_WQ_NAPI_BUDGET 256
#define ENIC_AIC_LARGE_PKT_DIFF 3
+enum ext_cq {
+ ENIC_RQ_CQ_ENTRY_SIZE_16,
+ ENIC_RQ_CQ_ENTRY_SIZE_32,
+ ENIC_RQ_CQ_ENTRY_SIZE_64,
+ ENIC_RQ_CQ_ENTRY_SIZE_MAX,
+};
+
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ + 8];
@@ -75,6 +84,10 @@ struct enic_rx_coal {
#define ENIC_SET_INSTANCE (1 << 3)
#define ENIC_SET_HOST (1 << 4)
+#define MAX_TSO BIT(16)
+#define WQ_ENET_MAX_DESC_LEN BIT(WQ_ENET_LEN_BITS)
+#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
+
struct enic_port_profile {
u32 set;
u8 request;
@@ -158,6 +171,7 @@ struct enic_rq_stats {
u64 pkt_truncated; /* truncated pkts */
u64 no_skb; /* out of skbs */
u64 desc_skip; /* Rx pkt went into later buffer */
+ u64 pp_alloc_fail; /* page pool alloc failure */
};
struct enic_wq {
@@ -169,6 +183,7 @@ struct enic_wq {
struct enic_rq {
struct vnic_rq vrq;
struct enic_rq_stats stats;
+ struct page_pool *pool;
} ____cacheline_aligned;
/* Per-instance private data structure */
@@ -223,9 +238,9 @@ struct enic {
unsigned int cq_avail;
unsigned int cq_count;
struct enic_rfs_flw_tbl rfs_h;
- u32 rx_copybreak;
u8 rss_key[ENIC_RSS_LEN];
struct vnic_gen_stats gen_stats;
+ enum ext_cq ext_cq;
};
static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
@@ -347,5 +362,6 @@ int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
void enic_set_ethtool_ops(struct net_device *netdev);
int __enic_set_rsskey(struct enic *enic);
+void enic_ext_cq(struct enic *enic);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 9900993b6aea..837f954873ee 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -125,7 +125,7 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
#ifdef CONFIG_RFS_ACCEL
void enic_flow_may_expire(struct timer_list *t)
{
- struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
+ struct enic *enic = timer_container_of(enic, t, rfs_h.rfs_may_expire);
bool res;
int j;
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h
index 8c4ce50da6e1..5f5284102fb0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.h
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.h
@@ -26,7 +26,7 @@ static inline void enic_rfs_timer_start(struct enic *enic)
static inline void enic_rfs_timer_stop(struct enic *enic)
{
- del_timer_sync(&enic->rfs_h.rfs_may_expire);
+ timer_delete_sync(&enic->rfs_h.rfs_may_expire);
}
#else
static inline void enic_rfs_timer_start(struct enic *enic) {}
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index d607b4f0542c..529160926a96 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -222,9 +222,9 @@ static void enic_get_ringparam(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
- ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
+ ring->rx_max_pending = c->max_rq_ring;
ring->rx_pending = c->rq_desc_count;
- ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
+ ring->tx_max_pending = c->max_wq_ring;
ring->tx_pending = c->wq_desc_count;
}
@@ -252,18 +252,18 @@ static int enic_set_ringparam(struct net_device *netdev,
}
rx_pending = c->rq_desc_count;
tx_pending = c->wq_desc_count;
- if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
+ if (ring->rx_pending > c->max_rq_ring ||
ring->rx_pending < ENIC_MIN_RQ_DESCS) {
netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
ring->rx_pending, ENIC_MIN_RQ_DESCS,
- ENIC_MAX_RQ_DESCS);
+ c->max_rq_ring);
return -EINVAL;
}
- if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
+ if (ring->tx_pending > c->max_wq_ring ||
ring->tx_pending < ENIC_MIN_WQ_DESCS) {
netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
ring->tx_pending, ENIC_MIN_WQ_DESCS,
- ENIC_MAX_WQ_DESCS);
+ c->max_wq_ring);
return -EINVAL;
}
if (running)
@@ -608,43 +608,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int enic_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = enic->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int enic_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- enic->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static u32 enic_get_rxfh_key_size(struct net_device *netdev)
{
return ENIC_RSS_LEN;
@@ -727,8 +690,6 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
.get_rxnfc = enic_get_rxnfc,
- .get_tunable = enic_get_tunable,
- .set_tunable = enic_set_tunable,
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9913952ccb42..6bc8dfdb3d4b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -58,18 +58,15 @@
#include "enic_dev.h"
#include "enic_pp.h"
#include "enic_clsf.h"
+#include "enic_rq.h"
+#include "enic_wq.h"
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
-#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
-#define MAX_TSO (1 << 16)
-#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
-#define RX_COPYBREAK_DEFAULT 256
-
/* Supported devices */
static const struct pci_device_id enic_id_table[] = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
@@ -109,7 +106,7 @@ static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
{0, 0}, /* 0 - 4 Gbps */
{0, 3}, /* 4 - 10 Gbps */
- {3, 6}, /* 10 - 40 Gbps */
+ {3, 6}, /* 10+ Gbps */
};
static void enic_init_affinity_hint(struct enic *enic)
@@ -322,54 +319,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
#endif
}
-static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
- if (buf->sop)
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
-
- if (buf->os_buf)
- dev_kfree_skb_any(buf->os_buf);
-}
-
-static void enic_wq_free_buf(struct vnic_wq *wq,
- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
- enic->wq[wq->index].stats.cq_work++;
- enic->wq[wq->index].stats.cq_bytes += buf->len;
- enic_free_wq_buf(wq, buf);
-}
-
-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- spin_lock(&enic->wq[q_number].lock);
-
- vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
- completed_index, enic_wq_free_buf,
- opaque);
-
- if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
- vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
- (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
- netif_wake_subqueue(enic->netdev, q_number);
- enic->wq[q_number].stats.wake++;
- }
-
- spin_unlock(&enic->wq[q_number].lock);
-
- return 0;
-}
-
static bool enic_log_q_error(struct enic *enic)
{
unsigned int i;
@@ -428,6 +377,36 @@ static void enic_mtu_check(struct enic *enic)
}
}
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+ unsigned int speed;
+ int index = -1;
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+ /* 1. Read the link speed from fw
+ * 2. Pick the default range for the speed
+ * 3. Update it in enic->rx_coalesce_setting
+ */
+ speed = vnic_dev_port_speed(enic->vdev);
+ if (speed > ENIC_LINK_SPEED_10G)
+ index = ENIC_LINK_40G_INDEX;
+ else if (speed > ENIC_LINK_SPEED_4G)
+ index = ENIC_LINK_10G_INDEX;
+ else
+ index = ENIC_LINK_4G_INDEX;
+
+ rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+ rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+ rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+ /* Start with the value provided by UCSM */
+ for (index = 0; index < enic->rq_count; index++)
+ enic->cq[index].cur_rx_coal_timeval =
+ enic->config.intr_timer_usec;
+
+ rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
static void enic_link_check(struct enic *enic)
{
int link_status = vnic_dev_link_status(enic->vdev);
@@ -436,6 +415,7 @@ static void enic_link_check(struct enic *enic)
if (link_status && !carrier_ok) {
netdev_info(enic->netdev, "Link UP\n");
netif_carrier_on(enic->netdev);
+ enic_set_rx_coal_setting(enic);
} else if (!link_status && carrier_ok) {
netdev_info(enic->netdev, "Link DOWN\n");
netif_carrier_off(enic->netdev);
@@ -1282,243 +1262,6 @@ nla_put_failure:
return -EMSGSIZE;
}
-static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
-
- if (!buf->os_buf)
- return;
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(buf->os_buf);
- buf->os_buf = NULL;
-}
-
-static int enic_rq_alloc_buf(struct vnic_rq *rq)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
- unsigned int os_buf_index = 0;
- dma_addr_t dma_addr;
- struct vnic_rq_buf *buf = rq->to_use;
-
- if (buf->os_buf) {
- enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
- buf->len);
-
- return 0;
- }
- skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb) {
- enic->rq[rq->index].stats.no_skb++;
- return -ENOMEM;
- }
-
- dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
- DMA_FROM_DEVICE);
- if (unlikely(enic_dma_map_check(enic, dma_addr))) {
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
- enic_queue_rq_desc(rq, skb, os_buf_index,
- dma_addr, len);
-
- return 0;
-}
-
-static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
- u32 pkt_len)
-{
- if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
- pkt_size->large_pkt_bytes_cnt += pkt_len;
- else
- pkt_size->small_pkt_bytes_cnt += pkt_len;
-}
-
-static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
- struct vnic_rq_buf *buf, u16 len)
-{
- struct enic *enic = netdev_priv(netdev);
- struct sk_buff *new_skb;
-
- if (len > enic->rx_copybreak)
- return false;
- new_skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!new_skb)
- return false;
- dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
- DMA_FROM_DEVICE);
- memcpy(new_skb->data, (*skb)->data, len);
- *skb = new_skb;
-
- return true;
-}
-
-static void enic_rq_indicate_buf(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
-
- u8 type, color, eop, sop, ingress_port, vlan_stripped;
- u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
- u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
- u8 packet_error;
- u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
- u32 rss_hash;
- bool outer_csum_ok = true, encap = false;
-
- rqstats->packets++;
- if (skipped) {
- rqstats->desc_skip++;
- return;
- }
-
- skb = buf->os_buf;
-
- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &ingress_port, &fcoe, &eop, &sop, &rss_type,
- &csum_not_calc, &rss_hash, &bytes_written,
- &packet_error, &vlan_stripped, &vlan_tci, &checksum,
- &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
- &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
- &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
- &fcs_ok);
-
- if (packet_error) {
-
- if (!fcs_ok) {
- if (bytes_written > 0)
- rqstats->bad_fcs++;
- else if (bytes_written == 0)
- rqstats->pkt_truncated++;
- }
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
-
- return;
- }
-
- if (eop && bytes_written > 0) {
-
- /* Good receive
- */
- rqstats->bytes += bytes_written;
- if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
- buf->os_buf = NULL;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
- buf->len, DMA_FROM_DEVICE);
- }
- prefetch(skb->data - NET_IP_ALIGN);
-
- skb_put(skb, bytes_written);
- skb->protocol = eth_type_trans(skb, netdev);
- skb_record_rx_queue(skb, q_number);
- if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
- (type == 3)) {
- switch (rss_type) {
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
- rqstats->l4_rss_hash++;
- break;
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
- rqstats->l3_rss_hash++;
- break;
- }
- }
- if (enic->vxlan.vxlan_udp_port_number) {
- switch (enic->vxlan.patch_level) {
- case 0:
- if (fcoe) {
- encap = true;
- outer_csum_ok = fcoe_fc_crc_ok;
- }
- break;
- case 2:
- if ((type == 7) &&
- (rss_hash & BIT(0))) {
- encap = true;
- outer_csum_ok = (rss_hash & BIT(1)) &&
- (rss_hash & BIT(2));
- }
- break;
- }
- }
-
- /* Hardware does not provide whole packet checksum. It only
- * provides pseudo checksum. Since hw validates the packet
- * checksum but not provide us the checksum value. use
- * CHECSUM_UNNECESSARY.
- *
- * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
- * inner csum_ok. outer_csum_ok is set by hw when outer udp
- * csum is correct or is zero.
- */
- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
- tcp_udp_csum_ok && outer_csum_ok &&
- (ipv4_csum_ok || ipv6)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = encap;
- if (encap)
- rqstats->csum_unnecessary_encap++;
- else
- rqstats->csum_unnecessary++;
- }
-
- if (vlan_stripped) {
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
- rqstats->vlan_stripped++;
- }
- skb_mark_napi_id(skb, &enic->napi[rq->index]);
- if (!(netdev->features & NETIF_F_GRO))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&enic->napi[q_number], skb);
- if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_intr_update_pkt_size(&cq->pkt_size_counter,
- bytes_written);
- } else {
-
- /* Buffer overflow
- */
- rqstats->pkt_truncated++;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
- }
-}
-
-static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_rq_service(&enic->rq[q_number].vrq, cq_desc,
- completed_index, VNIC_RQ_RETURN_DESC,
- enic_rq_indicate_buf, opaque);
-
- return 0;
-}
-
static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
{
unsigned int intr = enic_msix_rq_intr(enic, rq->index);
@@ -1589,12 +1332,10 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
- wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
- enic_wq_service, NULL);
+ wq_work_done = enic_wq_cq_service(enic, cq_wq, wq_work_to_do);
if (budget > 0)
- rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
- rq_work_to_do, enic_rq_service, NULL);
+ rq_work_done = enic_rq_cq_service(enic, cq_rq, rq_work_to_do);
/* Accumulate intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1693,8 +1434,8 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
wq_irq = wq->index;
cq = enic_cq_wq(enic, wq_irq);
intr = enic_msix_wq_intr(enic, wq_irq);
- wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
- enic_wq_service, NULL);
+
+ wq_work_done = enic_wq_cq_service(enic, cq, wq_work_to_do);
vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
0 /* don't unmask intr */,
@@ -1723,8 +1464,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
if (budget > 0)
- work_done = vnic_cq_service(&enic->cq[cq],
- work_to_do, enic_rq_service, NULL);
+ work_done = enic_rq_cq_service(enic, cq, work_to_do);
/* Return intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1770,7 +1510,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
static void enic_notify_timer(struct timer_list *t)
{
- struct enic *enic = from_timer(enic, t, notify_timer);
+ struct enic *enic = timer_container_of(enic, t, notify_timer);
enic_notify_check(enic);
@@ -1901,36 +1641,6 @@ static void enic_synchronize_irqs(struct enic *enic)
}
}
-static void enic_set_rx_coal_setting(struct enic *enic)
-{
- unsigned int speed;
- int index = -1;
- struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
-
- /* 1. Read the link speed from fw
- * 2. Pick the default range for the speed
- * 3. Update it in enic->rx_coalesce_setting
- */
- speed = vnic_dev_port_speed(enic->vdev);
- if (ENIC_LINK_SPEED_10G < speed)
- index = ENIC_LINK_40G_INDEX;
- else if (ENIC_LINK_SPEED_4G < speed)
- index = ENIC_LINK_10G_INDEX;
- else
- index = ENIC_LINK_4G_INDEX;
-
- rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
- rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
- rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
-
- /* Start with the value provided by UCSM */
- for (index = 0; index < enic->rq_count; index++)
- enic->cq[index].cur_rx_coal_timeval =
- enic->config.intr_timer_usec;
-
- rx_coal->use_adaptive_rx_coalesce = 1;
-}
-
static int enic_dev_notify_set(struct enic *enic)
{
int err;
@@ -1971,6 +1681,17 @@ static int enic_open(struct net_device *netdev)
struct enic *enic = netdev_priv(netdev);
unsigned int i;
int err, ret;
+ unsigned int max_pkt_len = netdev->mtu + VLAN_ETH_HLEN;
+ struct page_pool_params pp_params = {
+ .order = get_order(max_pkt_len),
+ .pool_size = enic->config.rq_desc_count,
+ .nid = dev_to_node(&enic->pdev->dev),
+ .dev = &enic->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = (max_pkt_len > PAGE_SIZE) ? max_pkt_len : PAGE_SIZE,
+ .netdev = netdev,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ };
err = enic_request_intr(enic);
if (err) {
@@ -1988,6 +1709,16 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
+ /* create a page pool for each RQ */
+ pp_params.napi = &enic->napi[i];
+ pp_params.queue_idx = i;
+ enic->rq[i].pool = page_pool_create(&pp_params);
+ if (IS_ERR(enic->rq[i].pool)) {
+ err = PTR_ERR(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ goto err_out_free_rq;
+ }
+
/* enable rq before updating rq desc */
vnic_rq_enable(&enic->rq[i].vrq);
vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf);
@@ -2028,8 +1759,11 @@ static int enic_open(struct net_device *netdev)
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
ret = vnic_rq_disable(&enic->rq[i].vrq);
- if (!ret)
+ if (!ret) {
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
}
enic_dev_notify_unset(enic);
err_out_free_intr:
@@ -2053,7 +1787,7 @@ static int enic_stop(struct net_device *netdev)
enic_synchronize_irqs(enic);
- del_timer_sync(&enic->notify_timer);
+ timer_delete_sync(&enic->notify_timer);
enic_rfs_flw_tbl_free(enic);
enic_dev_disable(enic);
@@ -2087,8 +1821,11 @@ static int enic_stop(struct net_device *netdev)
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf);
- for (i = 0; i < enic->rq_count; i++)
+ for (i = 0; i < enic->rq_count; i++) {
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -2127,10 +1864,10 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
- if (netdev->mtu > enic->port_mtu)
+ if (new_mtu > enic->port_mtu)
netdev_warn(netdev,
"interface MTU (%d) set higher than port MTU (%d)\n",
- netdev->mtu, enic->port_mtu);
+ new_mtu, enic->port_mtu);
return _enic_change_mtu(netdev, new_mtu);
}
@@ -2404,6 +2141,7 @@ static void enic_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2430,6 +2168,7 @@ static void enic_tx_hang_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2557,7 +2296,8 @@ static int enic_adjust_resources(struct enic *enic)
* used based on which resource is the most constrained
*/
wq_avail = min(enic->wq_avail, ENIC_WQ_MAX);
- rq_default = netif_get_num_default_rss_queues();
+ rq_default = max(netif_get_num_default_rss_queues(),
+ ENIC_RQ_MIN_DEFAULT);
rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default);
max_queues = min(enic->cq_avail,
enic->intr_avail - ENIC_MSIX_RESERVED_INTR);
@@ -2598,6 +2338,7 @@ static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
rxs->hw_drop_overruns = rqstats->pkt_truncated;
rxs->csum_unnecessary = rqstats->csum_unnecessary +
rqstats->csum_unnecessary_encap;
+ rxs->alloc_fail = rqstats->pp_alloc_fail;
}
static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -2625,6 +2366,7 @@ static void enic_get_base_stats(struct net_device *dev,
rxs->hw_drops = 0;
rxs->hw_drop_overruns = 0;
rxs->csum_unnecessary = 0;
+ rxs->alloc_fail = 0;
txs->bytes = 0;
txs->packets = 0;
txs->csum_none = 0;
@@ -2802,6 +2544,8 @@ static int enic_dev_init(struct enic *enic)
enic_get_res_counts(enic);
+ enic_ext_cq(enic);
+
err = enic_alloc_enic_resources(enic);
if (err) {
dev_err(dev, "Failed to allocate enic resources\n");
@@ -3063,7 +2807,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&enic->notify_timer, enic_notify_timer, 0);
enic_rfs_flw_tbl_init(enic);
- enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
@@ -3179,7 +2922,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot register net device, aborting\n");
goto err_out_dev_deinit;
}
- enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
return 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 126125199833..bbd3143ed73e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -59,31 +59,38 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(intr_timer_usec);
GET_CONFIG(loop_tag);
GET_CONFIG(num_arfs);
+ GET_CONFIG(max_rq_ring);
+ GET_CONFIG(max_wq_ring);
+ GET_CONFIG(max_cq_ring);
+
+ if (!c->max_wq_ring)
+ c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT;
+ if (!c->max_rq_ring)
+ c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT;
+ if (!c->max_cq_ring)
+ c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT;
c->wq_desc_count =
- min_t(u32, ENIC_MAX_WQ_DESCS,
- max_t(u32, ENIC_MIN_WQ_DESCS,
- c->wq_desc_count));
+ min_t(u32, c->max_wq_ring,
+ max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count));
c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
- min_t(u32, ENIC_MAX_RQ_DESCS,
- max_t(u32, ENIC_MIN_RQ_DESCS,
- c->rq_desc_count));
+ min_t(u32, c->max_rq_ring,
+ max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count));
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
if (c->mtu == 0)
c->mtu = 1500;
- c->mtu = min_t(u16, ENIC_MAX_MTU,
- max_t(u16, ENIC_MIN_MTU,
- c->mtu));
+ c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu));
c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
vnic_dev_get_intr_coal_timer_max(enic->vdev));
dev_info(enic_get_dev(enic),
- "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
- enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
+ "vNIC MAC addr %pM wq/rq %d/%d max wq/rq/cq %d/%d/%d mtu %d\n",
+ enic->mac_addr, c->wq_desc_count, c->rq_desc_count,
+ c->max_wq_ring, c->max_rq_ring, c->max_cq_ring, c->mtu);
dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
"tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
@@ -312,6 +319,7 @@ void enic_init_vnic_resources(struct enic *enic)
int enic_alloc_vnic_resources(struct enic *enic)
{
enum vnic_dev_intr_mode intr_mode;
+ int rq_cq_desc_size;
unsigned int i;
int err;
@@ -326,6 +334,24 @@ int enic_alloc_vnic_resources(struct enic *enic)
intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
"unknown");
+ switch (enic->ext_cq) {
+ case ENIC_RQ_CQ_ENTRY_SIZE_16:
+ rq_cq_desc_size = 16;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_32:
+ rq_cq_desc_size = 32;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_64:
+ rq_cq_desc_size = 64;
+ break;
+ default:
+ dev_err(enic_get_dev(enic),
+ "Unable to determine rq cq desc size: %d",
+ enic->ext_cq);
+ err = -ENODEV;
+ goto err_out;
+ }
+
/* Allocate queue resources
*/
@@ -348,8 +374,8 @@ int enic_alloc_vnic_resources(struct enic *enic)
for (i = 0; i < enic->cq_count; i++) {
if (i < enic->rq_count)
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
- enic->config.rq_desc_count,
- sizeof(struct cq_enet_rq_desc));
+ enic->config.rq_desc_count,
+ rq_cq_desc_size);
else
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.wq_desc_count,
@@ -380,6 +406,39 @@ int enic_alloc_vnic_resources(struct enic *enic)
err_out_cleanup:
enic_free_vnic_resources(enic);
-
+err_out:
return err;
}
+
+/*
+ * CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support
+ * that command
+ */
+void enic_ext_cq(struct enic *enic)
+{
+ u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ spin_lock_bh(&enic->devcmd_lock);
+ ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (ret || a0) {
+ dev_info(&enic->pdev->dev,
+ "CMD_CQ_ENTRY_SIZE_SET not supported.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ goto out;
+ }
+ a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT;
+ enic->ext_cq = fls(a1) - 1;
+ a0 = VNIC_RQ_ALL;
+ a1 = enic->ext_cq;
+ ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait);
+ if (ret) {
+ dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ }
+out:
+ spin_unlock_bh(&enic->devcmd_lock);
+ dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes",
+ 16 << enic->ext_cq);
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index b8ee42d297aa..02dca1ae4a22 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -12,10 +12,13 @@
#include "vnic_wq.h"
#include "vnic_rq.h"
-#define ENIC_MIN_WQ_DESCS 64
-#define ENIC_MAX_WQ_DESCS 4096
-#define ENIC_MIN_RQ_DESCS 64
-#define ENIC_MAX_RQ_DESCS 4096
+#define ENIC_MIN_WQ_DESCS 64
+#define ENIC_MAX_WQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_WQ_DESCS 16384
+#define ENIC_MIN_RQ_DESCS 64
+#define ENIC_MAX_RQ_DESCS 16384
+#define ENIC_MAX_RQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_CQ_DESCS_DEFAULT (64 * 1024)
#define ENIC_MIN_MTU ETH_MIN_MTU
#define ENIC_MAX_MTU 9000
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.c b/drivers/net/ethernet/cisco/enic/enic_rq.c
new file mode 100644
index 000000000000..ccbf5c9a21d0
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2024 Cisco Systems, Inc. All rights reserved.
+
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/busy_poll.h>
+#include "enic.h"
+#include "enic_res.h"
+#include "enic_rq.h"
+#include "vnic_rq.h"
+#include "cq_enet_desc.h"
+
+#define ENIC_LARGE_PKT_THRESHOLD 1000
+
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+ u32 pkt_len)
+{
+ if (pkt_len > ENIC_LARGE_PKT_THRESHOLD)
+ pkt_size->large_pkt_bytes_cnt += pkt_len;
+ else
+ pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
+static void enic_rq_cq_desc_dec(void *cq_desc, u8 cq_desc_size, u8 *type,
+ u8 *color, u16 *q_number, u16 *completed_index)
+{
+ /* type_color is the last field for all cq structs */
+ u8 type_color;
+
+ switch (cq_desc_size) {
+ case VNIC_RQ_CQ_ENTRY_SIZE_16: {
+ struct cq_enet_rq_desc *desc =
+ (struct cq_enet_rq_desc *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_32: {
+ struct cq_enet_rq_desc_32 *desc =
+ (struct cq_enet_rq_desc_32 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_32_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_64: {
+ struct cq_enet_rq_desc_64 *desc =
+ (struct cq_enet_rq_desc_64 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_64_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ }
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+ *type = type_color & CQ_DESC_TYPE_MASK;
+}
+
+static void enic_rq_set_skb_flags(struct vnic_rq *vrq, u8 type, u32 rss_hash,
+ u8 rss_type, u8 fcoe, u8 fcoe_fc_crc_ok,
+ u8 vlan_stripped, u8 csum_not_calc,
+ u8 tcp_udp_csum_ok, u8 ipv6, u8 ipv4_csum_ok,
+ u16 vlan_tci, struct sk_buff *skb)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+ bool outer_csum_ok = true, encap = false;
+
+ if ((netdev->features & NETIF_F_RXHASH) && rss_hash && type == 3) {
+ switch (rss_type) {
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
+ rqstats->l4_rss_hash++;
+ break;
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
+ rqstats->l3_rss_hash++;
+ break;
+ }
+ }
+ if (enic->vxlan.vxlan_udp_port_number) {
+ switch (enic->vxlan.patch_level) {
+ case 0:
+ if (fcoe) {
+ encap = true;
+ outer_csum_ok = fcoe_fc_crc_ok;
+ }
+ break;
+ case 2:
+ if (type == 7 && (rss_hash & BIT(0))) {
+ encap = true;
+ outer_csum_ok = (rss_hash & BIT(1)) &&
+ (rss_hash & BIT(2));
+ }
+ break;
+ }
+ }
+
+ /* Hardware does not provide whole packet checksum. It only
+ * provides pseudo checksum. Since hw validates the packet
+ * checksum but not provide us the checksum value. use
+ * CHECSUM_UNNECESSARY.
+ *
+ * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
+ * inner csum_ok. outer_csum_ok is set by hw when outer udp
+ * csum is correct or is zero.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
+ tcp_udp_csum_ok && outer_csum_ok && (ipv4_csum_ok || ipv6)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = encap;
+ if (encap)
+ rqstats->csum_unnecessary_encap++;
+ else
+ rqstats->csum_unnecessary++;
+ }
+
+ if (vlan_stripped) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ rqstats->vlan_stripped++;
+ }
+}
+
+/*
+ * cq_enet_rq_desc accesses section uses only the 1st 15 bytes of the cq which
+ * is identical for all type (16,32 and 64 byte) of cqs.
+ */
+static void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *ingress_port,
+ u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash,
+ u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan_tci,
+ u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error,
+ u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp,
+ u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4,
+ u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
+
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ /*
+ * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+ */
+ *vlan_tci = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+static bool enic_rq_pkt_error(struct vnic_rq *vrq, u8 packet_error, u8 fcs_ok,
+ u16 bytes_written)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+
+ if (packet_error) {
+ if (!fcs_ok) {
+ if (bytes_written > 0)
+ rqstats->bad_fcs++;
+ else if (bytes_written == 0)
+ rqstats->pkt_truncated++;
+ }
+ return true;
+ }
+ return false;
+}
+
+int enic_rq_alloc_buf(struct vnic_rq *rq)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq *erq = &enic->rq[rq->index];
+ struct enic_rq_stats *rqstats = &erq->stats;
+ unsigned int offset = 0;
+ unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
+ unsigned int os_buf_index = 0;
+ dma_addr_t dma_addr;
+ struct vnic_rq_buf *buf = rq->to_use;
+ struct page *page;
+ unsigned int truesize = len;
+
+ if (buf->os_buf) {
+ enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+ buf->len);
+
+ return 0;
+ }
+
+ page = page_pool_dev_alloc(erq->pool, &offset, &truesize);
+ if (unlikely(!page)) {
+ rqstats->pp_alloc_fail++;
+ return -ENOMEM;
+ }
+ buf->offset = offset;
+ buf->truesize = truesize;
+ dma_addr = page_pool_get_dma_addr(page) + offset;
+ enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
+
+ return 0;
+}
+
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct enic_rq *erq = &enic->rq[rq->index];
+
+ if (!buf->os_buf)
+ return;
+
+ page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true);
+ buf->os_buf = NULL;
+}
+
+static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq,
+ struct vnic_rq_buf *buf, void *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct sk_buff *skb;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
+ struct napi_struct *napi;
+
+ u8 eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+ u8 packet_error;
+ u16 bytes_written, vlan_tci, checksum;
+ u32 rss_hash;
+
+ rqstats->packets++;
+
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &ingress_port,
+ &fcoe, &eop, &sop, &rss_type, &csum_not_calc,
+ &rss_hash, &bytes_written, &packet_error,
+ &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof,
+ &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof,
+ &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6,
+ &ipv4, &ipv4_fragment, &fcs_ok);
+
+ if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
+ return;
+
+ if (eop && bytes_written > 0) {
+ /* Good receive
+ */
+ rqstats->bytes += bytes_written;
+ napi = &enic->napi[rq->index];
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
+ enic->netdev->name, rq->index,
+ completed_index);
+ rqstats->no_skb++;
+ return;
+ }
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr,
+ bytes_written, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ (struct page *)buf->os_buf, buf->offset,
+ bytes_written, buf->truesize);
+ skb_record_rx_queue(skb, q_number);
+ enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
+ fcoe_fc_crc_ok, vlan_stripped,
+ csum_not_calc, tcp_udp_csum_ok, ipv6,
+ ipv4_csum_ok, vlan_tci, skb);
+ skb_mark_for_recycle(skb);
+ napi_gro_frags(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_intr_update_pkt_size(&cq->pkt_size_counter,
+ bytes_written);
+ buf->os_buf = NULL;
+ buf->dma_addr = 0;
+ buf = buf->next;
+ } else {
+ /* Buffer overflow
+ */
+ rqstats->pkt_truncated++;
+ }
+}
+
+static void enic_rq_service(struct enic *enic, void *cq_desc, u8 type,
+ u16 q_number, u16 completed_index)
+{
+ struct enic_rq_stats *rqstats = &enic->rq[q_number].stats;
+ struct vnic_rq *vrq = &enic->rq[q_number].vrq;
+ struct vnic_rq_buf *vrq_buf = vrq->to_clean;
+ int skipped;
+
+ while (1) {
+ skipped = (vrq_buf->index != completed_index);
+ if (!skipped)
+ enic_rq_indicate_buf(enic, vrq, vrq_buf, cq_desc, type,
+ q_number, completed_index);
+ else
+ rqstats->desc_skip++;
+
+ vrq->ring.desc_avail++;
+ vrq->to_clean = vrq_buf->next;
+ vrq_buf = vrq_buf->next;
+ if (!skipped)
+ break;
+ }
+}
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ void *cq_desc = vnic_cq_to_clean(cq);
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ u8 type, color;
+
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, &q_number,
+ &completed_index);
+
+ while (color != cq->last_color) {
+ enic_rq_service(enic, cq_desc, type, q_number, completed_index);
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = vnic_cq_to_clean(cq);
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.h b/drivers/net/ethernet/cisco/enic/enic_rq.h
new file mode 100644
index 000000000000..98476a7297af
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2024 Cisco Systems, Inc. All rights reserved.
+ */
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
+int enic_rq_alloc_buf(struct vnic_rq *rq);
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.c b/drivers/net/ethernet/cisco/enic/enic_wq.c
new file mode 100644
index 000000000000..07936f8b4231
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2025 Cisco Systems, Inc. All rights reserved.
+
+#include <net/netdev_queues.h>
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_wq.h"
+
+#define ENET_CQ_DESC_COMP_NDX_BITS 14
+#define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0)
+
+static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq,
+ u8 *type, u8 *color, u16 *q_number,
+ u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+
+ if (ext_wq)
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ ENET_CQ_DESC_COMP_NDX_MASK;
+ else
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ if (buf->sop)
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+
+ if (buf->os_buf)
+ dev_kfree_skb_any(buf->os_buf);
+}
+
+static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc,
+ struct vnic_wq_buf *buf, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ enic->wq[wq->index].stats.cq_work++;
+ enic->wq[wq->index].stats.cq_bytes += buf->len;
+ enic_free_wq_buf(wq, buf);
+}
+
+static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+
+ spin_lock(&enic->wq[q_number].lock);
+
+ vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
+ completed_index, enic_wq_free_buf, NULL);
+
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number))
+ && vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
+ (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
+ netif_wake_subqueue(enic->netdev, q_number);
+ enic->wq[q_number].stats.wake++;
+ }
+
+ spin_unlock(&enic->wq[q_number].lock);
+}
+
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ struct cq_desc *cq_desc;
+ u8 type, color;
+ bool ext_wq;
+
+ ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS_DEFAULT;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+ enic_wq_service(cq->vdev, cq_desc, type, q_number,
+ completed_index);
+
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.h b/drivers/net/ethernet/cisco/enic/enic_wq.h
new file mode 100644
index 000000000000..12acb3f2fbc9
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2025 Cisco Systems, Inc. All rights reserved.
+ */
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index eed5bf59e5d2..0e37f5d5e527 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -56,45 +56,18 @@ struct vnic_cq {
ktime_t prev_ts;
};
-static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
- unsigned int work_to_do,
- int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque),
- void *opaque)
+static inline void *vnic_cq_to_clean(struct vnic_cq *cq)
{
- struct cq_desc *cq_desc;
- unsigned int work_done = 0;
- u16 q_number, completed_index;
- u8 type, color;
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
-
- while (color != cq->last_color) {
-
- if ((*q_service)(cq->vdev, cq_desc, type,
- q_number, completed_index, opaque))
- break;
-
- cq->to_clean++;
- if (cq->to_clean == cq->ring.desc_count) {
- cq->to_clean = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
+ return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean);
+}
- work_done++;
- if (work_done >= work_to_do)
- break;
+static inline void vnic_cq_inc_to_clean(struct vnic_cq *cq)
+{
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
}
-
- return work_done;
}
void vnic_cq_free(struct vnic_cq *cq);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index db56d778877a..605ef17f967e 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -436,6 +436,25 @@ enum vnic_devcmd_cmd {
* in: (u16) a2 = unsigned short int port information
*/
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Set extended CQ field in MREGS of RQ (or all RQs)
+ * for given vNIC
+ * in: (u64) a0 = RQ selection (VNIC_RQ_ALL for all RQs)
+ * (u32) a1 = CQ entry size
+ * VNIC_RQ_CQ_ENTRY_SIZE_16 --> 16 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_32 --> 32 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_64 --> 64 bytes
+ *
+ * Capability query:
+ * out: (u32) a0 = errno, 0:valid cmd
+ * (u32) a1 = value consisting of supported entries
+ * bit 0: 16 bytes
+ * bit 1: 32 bytes
+ * bit 2: 64 bytes
+ */
+ CMD_CQ_ENTRY_SIZE_SET = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 90),
+
};
/* CMD_ENABLE2 flags */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 5acc236069de..9e8e86262a3f 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -21,6 +21,11 @@ struct vnic_enet_config {
u16 loop_tag;
u16 vf_rq_count;
u16 num_arfs;
+ u8 reserved[66];
+ u32 max_rq_ring; // MAX RQ ring size
+ u32 max_wq_ring; // MAX WQ ring size
+ u32 max_cq_ring; // MAX CQ ring size
+ u32 rdma_rsvd_lkey; // Reserved (privileged) LKey
};
#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 0bc595abc03b..a1cdd729caec 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -50,7 +50,7 @@ struct vnic_rq_ctrl {
(VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(16384)
struct vnic_rq_buf {
struct vnic_rq_buf *next;
@@ -61,6 +61,8 @@ struct vnic_rq_buf {
unsigned int index;
void *desc;
uint64_t wr_id;
+ unsigned int offset;
+ unsigned int truesize;
};
enum enic_poll_state {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 75c526911074..3bb4758100ba 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -62,7 +62,7 @@ struct vnic_wq_buf {
(VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(16384)
struct vnic_wq {
unsigned int index;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 991e3839858b..6a2004bbe87f 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -40,6 +40,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/gro.h>
#include "gemini.h"
@@ -1143,6 +1144,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
struct gmac_txdesc *txd;
skb_frag_t *skb_frag;
dma_addr_t mapping;
+ bool tcp = false;
void *buffer;
u16 mss;
int ret;
@@ -1150,6 +1152,13 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
word1 = skb->len;
word3 = SOF_BIT;
+ /* Determine if we are doing TCP */
+ if (skb->protocol == htons(ETH_P_IP))
+ tcp = (ip_hdr(skb)->protocol == IPPROTO_TCP);
+ else
+ /* IPv6 */
+ tcp = (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP);
+
mss = skb_shinfo(skb)->gso_size;
if (mss) {
/* This means we are dealing with TCP and skb->len is the
@@ -1162,8 +1171,26 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
mss, skb->len);
word1 |= TSS_MTU_ENABLE_BIT;
word3 |= mss;
+ } else if (tcp) {
+ /* Even if we are not using TSO, use the hardware offloader
+ * for transferring the TCP frame: this hardware has partial
+ * TCP awareness (called TOE - TCP Offload Engine) and will
+ * according to the datasheet put packets belonging to the
+ * same TCP connection in the same queue for the TOE/TSO
+ * engine to process. The engine will deal with chopping
+ * up frames that exceed ETH_DATA_LEN which the
+ * checksumming engine cannot handle (see below) into
+ * manageable chunks. It flawlessly deals with quite big
+ * frames and frames containing custom DSA EtherTypes.
+ */
+ mss = netdev->mtu + skb_tcp_all_headers(skb);
+ mss = min(mss, skb->len);
+ netdev_dbg(netdev, "TOE/TSO len %04x mtu %04x mss %04x\n",
+ skb->len, netdev->mtu, mss);
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mss;
} else if (skb->len >= ETH_FRAME_LEN) {
- /* Hardware offloaded checksumming isn't working on frames
+ /* Hardware offloaded checksumming isn't working on non-TCP frames
* bigger than 1514 bytes. A hypothesis about this is that the
* checksum buffer is only 1518 bytes, so when the frames get
* bigger they get truncated, or the last few bytes get
@@ -1180,21 +1207,16 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- int tcp = 0;
-
/* We do not switch off the checksumming on non TCP/UDP
* frames: as is shown from tests, the checksumming engine
* is smart enough to see that a frame is not actually TCP
* or UDP and then just pass it through without any changes
* to the frame.
*/
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP))
word1 |= TSS_IP_CHKSUM_BIT;
- tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
- } else { /* IPv6 */
+ else
word1 |= TSS_IPV6_ENABLE_BIT;
- tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
- }
word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
}
@@ -1833,9 +1855,8 @@ static int gmac_open(struct net_device *netdev)
gmac_enable_tx_rx(netdev);
netif_tx_start_all_queues(netdev);
- hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+ hrtimer_setup(&port->rx_coalesce_timer, &gmac_coalesce_delay_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
netdev_dbg(netdev, "opened\n");
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8735e333034c..b87eaf0c250c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1777,10 +1777,11 @@ static void dm9000_drv_remove(struct platform_device *pdev)
unregister_netdev(ndev);
dm9000_release_board(pdev, dm);
- free_netdev(ndev); /* free device structure */
if (dm->power_supply)
regulator_disable(dm->power_supply);
+ free_netdev(ndev); /* free device structure */
+
dev_dbg(&pdev->dev, "released and freed device\n");
}
diff --git a/drivers/net/ethernet/dec/tulip/21142.c b/drivers/net/ethernet/dec/tulip/21142.c
index 369858272650..76767dec216d 100644
--- a/drivers/net/ethernet/dec/tulip/21142.c
+++ b/drivers/net/ethernet/dec/tulip/21142.c
@@ -216,7 +216,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
(csr12 & 2) == 2) ||
(tp->nway && (csr5 & (TPLnkFail)))) {
/* Link blew? Maybe restart NWay. */
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
t21142_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
@@ -226,7 +226,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
medianame[dev->if_port],
(csr12 & 2) ? "failed" : "good");
if ((csr12 & 2) && ! tp->medialock) {
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
t21142_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 0a161a4db242..f9504f340c4a 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -963,7 +963,7 @@ static void de_next_media (struct de_private *de, const u32 *media,
static void de21040_media_timer (struct timer_list *t)
{
- struct de_private *de = from_timer(de, t, media_timer);
+ struct de_private *de = timer_container_of(de, t, media_timer);
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
@@ -1044,7 +1044,7 @@ static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
static void de21041_media_timer (struct timer_list *t)
{
- struct de_private *de = from_timer(de, t, media_timer);
+ struct de_private *de = timer_container_of(de, t, media_timer);
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
@@ -1428,7 +1428,7 @@ static int de_close (struct net_device *dev)
netif_dbg(de, ifdown, dev, "disabling interface\n");
- del_timer_sync(&de->media_timer);
+ timer_delete_sync(&de->media_timer);
spin_lock_irqsave(&de->lock, flags);
de_stop_hw(de);
@@ -1452,7 +1452,7 @@ static void de_tx_timeout (struct net_device *dev, unsigned int txqueue)
dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
de->rx_tail, de->tx_head, de->tx_tail);
- del_timer_sync(&de->media_timer);
+ timer_delete_sync(&de->media_timer);
disable_irq(irq);
spin_lock_irq(&de->lock);
@@ -2126,7 +2126,7 @@ static int __maybe_unused de_suspend(struct device *dev_d)
if (netif_running (dev)) {
const int irq = pdev->irq;
- del_timer_sync(&de->media_timer);
+ timer_delete_sync(&de->media_timer);
disable_irq(irq);
spin_lock_irq(&de->lock);
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 3188ba7b450f..2d3bd343b6e6 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -745,7 +745,7 @@ static int dmfe_stop(struct net_device *dev)
netif_stop_queue(dev);
/* deleted timer */
- del_timer_sync(&db->timer);
+ timer_delete_sync(&db->timer);
/* Reset & stop DM910X board */
dw32(DCR0, DM910X_RESET);
@@ -1115,7 +1115,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static void dmfe_timer(struct timer_list *t)
{
- struct dmfe_board_info *db = from_timer(db, t, timer);
+ struct dmfe_board_info *db = timer_container_of(db, t, timer);
struct net_device *dev = pci_get_drvdata(db->pdev);
void __iomem *ioaddr = db->ioaddr;
u32 tmp_cr8;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 54560f9a1651..0a12cb9b3ba7 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -104,7 +104,7 @@ int tulip_refill_rx(struct net_device *dev)
void oom_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, oom_timer);
+ struct tulip_private *tp = timer_container_of(tp, t, oom_timer);
napi_schedule(&tp->napi);
}
@@ -699,8 +699,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
tulip_start_rxtx(tp);
}
/*
- * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
- * call is ever done under the spinlock
+ * NB: t21142_lnk_change() does a timer_delete_sync(), so be careful
+ * if this call is ever done under the spinlock
*/
if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
if (tp->link_change)
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 653bde48ef44..1de5ed967070 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -86,7 +86,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
void pnic_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
index 72a09156b48b..39c410bf224e 100644
--- a/drivers/net/ethernet/dec/tulip/pnic2.c
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -78,7 +78,7 @@
void pnic2_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
@@ -323,7 +323,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 2)
netdev_dbg(dev, "Ugh! Link blew?\n");
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
@@ -348,7 +348,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* if failed then try doing an nway to get in sync */
if ((csr12 & 2) && ! tp->medialock) {
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
@@ -372,7 +372,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* if failed, try doing an nway to get in sync */
if ((csr12 & 4) && ! tp->medialock) {
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 642e9dfc5451..ca0c509b601c 100644
--- a/drivers/net/ethernet/dec/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -139,7 +139,7 @@ void tulip_media_task(struct work_struct *work)
void mxic_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
@@ -156,7 +156,7 @@ void mxic_timer(struct timer_list *t)
void comet_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
int next_tick = 2*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 27e01d780cd0..5b7e6eb080f3 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -114,7 +114,7 @@ int tulip_debug = 1;
static void tulip_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
if (netif_running(dev))
@@ -747,9 +747,9 @@ static void tulip_down (struct net_device *dev)
napi_disable(&tp->napi);
#endif
- del_timer_sync (&tp->timer);
+ timer_delete_sync(&tp->timer);
#ifdef CONFIG_TULIP_NAPI
- del_timer_sync (&tp->oom_timer);
+ timer_delete_sync(&tp->oom_timer);
#endif
spin_lock_irqsave (&tp->lock, flags);
@@ -1177,7 +1177,6 @@ static void set_rx_mode(struct net_device *dev)
iowrite32(csr6, ioaddr + CSR6);
}
-#ifdef CONFIG_TULIP_MWI
static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
@@ -1251,7 +1250,6 @@ out:
netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
cache, csr0);
}
-#endif
/*
* Chips that have the MRM/reserved bit quirk and the burst quirk. That
@@ -1413,7 +1411,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions(pdev, DRV_NAME))
+ if (pcim_request_all_regions(pdev, DRV_NAME))
return -ENODEV;
ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
@@ -1463,10 +1461,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
-#ifdef CONFIG_TULIP_MWI
- if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
+ if (IS_ENABLED(CONFIG_TULIP_MWI) && !force_csr0 &&
+ (tp->flags & HAS_PCI_MWI))
tulip_mwi_config (pdev, dev);
-#endif
/* Stop the chip's Tx and Rx processes. */
tulip_stop_rxtx(tp);
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index ff080ab0f116..6e4d8d31aba9 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -656,7 +656,7 @@ static int uli526x_stop(struct net_device *dev)
netif_stop_queue(dev);
/* deleted timer */
- del_timer_sync(&db->timer);
+ timer_delete_sync(&db->timer);
/* Reset & stop ULI526X board */
uw32(DCR0, ULI526X_RESET);
@@ -1014,7 +1014,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static void uli526x_timer(struct timer_list *t)
{
- struct uli526x_board_info *db = from_timer(db, t, timer);
+ struct uli526x_board_info *db = timer_container_of(db, t, timer);
struct net_device *dev = pci_get_drvdata(db->pdev);
struct uli_phy_ops *phy = &db->phy;
void __iomem *ioaddr = db->ioaddr;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 37fba39c0056..a24a25a5f73d 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -375,7 +375,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_request_regions(pdev, DRV_NAME))
+ if (pcim_request_all_regions(pdev, DRV_NAME))
goto err_out_netdev;
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
@@ -763,7 +763,7 @@ static inline void update_csr6(struct net_device *dev, int new)
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = pci_get_drvdata(np->pci_dev);
void __iomem *ioaddr = np->base_addr;
@@ -1509,7 +1509,7 @@ static int netdev_close(struct net_device *dev)
}
#endif /* __i386__ debugging only */
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
free_rxtx_rings(np);
free_ringdesc(np);
@@ -1560,7 +1560,7 @@ static int __maybe_unused w840_suspend(struct device *dev_d)
rtnl_lock();
if (netif_running (dev)) {
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
spin_lock_irq(&np->lock);
netif_device_detach(dev);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index d0ea92607870..da9b7715df05 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -146,6 +146,8 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np->ioaddr = ioaddr;
np->chip_id = chip_idx;
np->pdev = pdev;
+
+ spin_lock_init(&np->stats_lock);
spin_lock_init (&np->tx_lock);
spin_lock_init (&np->rx_lock);
@@ -352,7 +354,7 @@ parse_eeprom (struct net_device *dev)
eth_hw_addr_set(dev, psrom->mac_addr);
if (np->chip_id == CHIP_IP1000A) {
- np->led_mode = psrom->led_mode;
+ np->led_mode = le16_to_cpu(psrom->led_mode);
return 0;
}
@@ -648,7 +650,7 @@ static int rio_open(struct net_device *dev)
static void
rio_timer (struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = pci_get_drvdata(np->pdev);
unsigned int entry;
int next_tick = 1*HZ;
@@ -865,7 +867,6 @@ tx_error (struct net_device *dev, int tx_status)
frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id);
- dev->stats.tx_errors++;
/* Ttransmit Underrun */
if (tx_status & 0x10) {
dev->stats.tx_fifo_errors++;
@@ -902,9 +903,15 @@ tx_error (struct net_device *dev, int tx_status)
rio_set_led_mode(dev);
/* Let TxStartThresh stay default value */
}
+
+ spin_lock(&np->stats_lock);
/* Maximum Collisions */
if (tx_status & 0x08)
dev->stats.collisions++;
+
+ dev->stats.tx_errors++;
+ spin_unlock(&np->stats_lock);
+
/* Restart the Tx */
dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
}
@@ -1073,7 +1080,9 @@ get_stats (struct net_device *dev)
int i;
#endif
unsigned int stat_reg;
+ unsigned long flags;
+ spin_lock_irqsave(&np->stats_lock, flags);
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
@@ -1123,6 +1132,9 @@ get_stats (struct net_device *dev)
dr16(TCPCheckSumErrors);
dr16(UDPCheckSumErrors);
dr16(IPCheckSumErrors);
+
+ spin_unlock_irqrestore(&np->stats_lock, flags);
+
return &dev->stats;
}
@@ -1778,7 +1790,7 @@ rio_close (struct net_device *dev)
rio_hw_stop(dev);
free_irq(pdev->irq, dev);
- del_timer_sync (&np->timer);
+ timer_delete_sync(&np->timer);
free_list(dev);
@@ -1818,7 +1830,7 @@ static int rio_suspend(struct device *device)
return 0;
netif_device_detach(dev);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
rio_hw_stop(dev);
return 0;
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 195dc6cfd895..ba679025e866 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -329,18 +329,18 @@ enum _pcs_anlpar {
};
typedef struct t_SROM {
- u16 config_param; /* 0x00 */
- u16 asic_ctrl; /* 0x02 */
- u16 sub_vendor_id; /* 0x04 */
- u16 sub_system_id; /* 0x06 */
- u16 pci_base_1; /* 0x08 (IP1000A only) */
- u16 pci_base_2; /* 0x0a (IP1000A only) */
- u16 led_mode; /* 0x0c (IP1000A only) */
- u16 reserved1[9]; /* 0x0e-0x1f */
+ __le16 config_param; /* 0x00 */
+ __le16 asic_ctrl; /* 0x02 */
+ __le16 sub_vendor_id; /* 0x04 */
+ __le16 sub_system_id; /* 0x06 */
+ __le16 pci_base_1; /* 0x08 (IP1000A only) */
+ __le16 pci_base_2; /* 0x0a (IP1000A only) */
+ __le16 led_mode; /* 0x0c (IP1000A only) */
+ __le16 reserved1[9]; /* 0x0e-0x1f */
u8 mac_addr[6]; /* 0x20-0x25 */
u8 reserved2[10]; /* 0x26-0x2f */
u8 sib[204]; /* 0x30-0xfb */
- u32 crc; /* 0xfc-0xff */
+ __le32 crc; /* 0xfc-0xff */
} SROM_t, *PSROM_t;
/* Ioctl custom data */
@@ -372,6 +372,8 @@ struct netdev_private {
struct pci_dev *pdev;
void __iomem *ioaddr;
void __iomem *eeprom_addr;
+ // To ensure synchronization when stats are updated.
+ spinlock_t stats_lock;
spinlock_t tx_lock;
spinlock_t rx_lock;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 44af1d13d931..67275aa4f65b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -416,8 +416,7 @@ static int ec_bhf_open(struct net_device *net_dev)
netif_start_queue(net_dev);
- hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->hrtimer.function = ec_bhf_timer_fun;
+ hrtimer_setup(&priv->hrtimer, ec_bhf_timer_fun, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index e48b861e4ce1..270ff9aab335 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -562,7 +562,7 @@ struct be_adapter {
struct be_dma_mem mbox_mem_alloced;
struct be_mcc_obj mcc_obj;
- struct mutex mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
u16 cfg_num_rx_irqs; /* configured via set-channels */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 61adcebeef01..d730af4a50c7 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -575,7 +575,7 @@ int be_process_mcc(struct be_adapter *adapter)
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
-#define mcc_timeout 12000 /* 12s timeout */
+#define mcc_timeout 120000 /* 12s timeout */
int i, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
@@ -589,7 +589,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (atomic_read(&mcc_obj->q.used) == 0)
break;
- usleep_range(500, 1000);
+ udelay(100);
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
@@ -866,7 +866,7 @@ static bool use_mcc(struct be_adapter *adapter)
static int be_cmd_lock(struct be_adapter *adapter)
{
if (use_mcc(adapter)) {
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
return 0;
} else {
return mutex_lock_interruptible(&adapter->mbox_lock);
@@ -877,7 +877,7 @@ static int be_cmd_lock(struct be_adapter *adapter)
static void be_cmd_unlock(struct be_adapter *adapter)
{
if (use_mcc(adapter))
- return mutex_unlock(&adapter->mcc_lock);
+ return spin_unlock_bh(&adapter->mcc_lock);
else
return mutex_unlock(&adapter->mbox_lock);
}
@@ -1047,7 +1047,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1076,7 +1076,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1088,7 +1088,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
struct be_cmd_req_pmac_add *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1113,7 +1113,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
@@ -1131,7 +1131,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
if (pmac_id == -1)
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1151,7 +1151,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1414,7 +1414,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1444,7 +1444,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1508,7 +1508,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
struct be_cmd_req_q_destroy *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1525,7 +1525,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
q->created = false;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1593,7 +1593,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1609,7 +1609,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
/* version 1 of the cmd is not supported only by BE2 */
if (BE2_chip(adapter))
hdr->version = 0;
- if (BE3_chip(adapter) || lancer_chip(adapter))
+ else if (BE3_chip(adapter) || lancer_chip(adapter))
hdr->version = 1;
else
hdr->version = 2;
@@ -1621,7 +1621,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1637,7 +1637,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
CMD_SUBSYSTEM_ETH))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1660,7 +1660,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1697,7 +1697,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
struct be_cmd_req_link_status *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
if (link_status)
*link_status = LINK_DOWN;
@@ -1736,7 +1736,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1747,7 +1747,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
struct be_cmd_req_get_cntl_addnl_attribs *req;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1762,7 +1762,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1811,7 +1811,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!get_fat_cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
while (total_size) {
buf_size = min(total_size, (u32)60 * 1024);
@@ -1849,9 +1849,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
log_offset += buf_size;
}
err:
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
get_fat_cmd.va, get_fat_cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1862,7 +1862,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
struct be_cmd_req_get_fw_version *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1885,7 +1885,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
sizeof(adapter->fw_on_flash));
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1899,7 +1899,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
struct be_cmd_req_modify_eq_delay *req;
int status = 0, i;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1922,7 +1922,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1949,7 +1949,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
struct be_cmd_req_vlan_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1971,7 +1971,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1982,7 +1982,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
struct be_cmd_req_rx_filter *req = mem->va;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2015,7 +2015,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2046,7 +2046,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2066,7 +2066,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
return -EOPNOTSUPP;
@@ -2085,7 +2085,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2108,7 +2108,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2189,7 +2189,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2214,7 +2214,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2226,7 +2226,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
struct be_cmd_req_enable_disable_beacon *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2247,7 +2247,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2258,7 +2258,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
struct be_cmd_req_get_beacon_state *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2282,7 +2282,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2306,7 +2306,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2328,7 +2328,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
memcpy(data, resp->page_data + off, len);
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2345,7 +2345,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
void *ctxt = NULL;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2387,7 +2387,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
@@ -2406,7 +2406,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2460,7 +2460,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2478,7 +2478,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2491,7 +2491,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
struct lancer_cmd_resp_read_object *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2525,7 +2525,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
}
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2537,7 +2537,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_cmd_write_flashrom *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2562,7 +2562,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(40000)))
@@ -2573,7 +2573,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2584,7 +2584,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2611,11 +2611,15 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
memcpy(flashed_crc, req->crc, 4);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
-static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
+/*
+ * Since the cookie is text, add a parsing-skipped space to keep it from
+ * ever being matched on storage holding this source file.
+ */
+static const char flash_cookie[32] __nonstring = "*** SE FLAS" "H DIRECTORY *** ";
static bool phy_flashing_required(struct be_adapter *adapter)
{
@@ -3217,7 +3221,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_cmd_req_acpi_wol_magic_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3234,7 +3238,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3249,7 +3253,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3272,7 +3276,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
@@ -3281,7 +3285,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3298,7 +3302,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3324,7 +3328,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
if (status)
goto err;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
wait_for_completion(&adapter->et_cmd_compl);
resp = embedded_payload(wrb);
@@ -3332,7 +3336,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
return status;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3348,7 +3352,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3382,7 +3386,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3393,7 +3397,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
struct be_cmd_req_seeprom_read *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3409,7 +3413,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3424,7 +3428,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3469,7 +3473,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3479,7 +3483,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
struct be_cmd_req_set_qos *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3499,7 +3503,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3611,7 +3615,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_req_get_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3643,7 +3647,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3655,7 +3659,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
struct be_cmd_req_set_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3675,7 +3679,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3707,7 +3711,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3771,7 +3775,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
}
out:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
@@ -3831,7 +3835,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
if (!cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3853,7 +3857,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
err:
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3889,7 +3893,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3930,7 +3934,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3944,7 +3948,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
int status;
u16 vid;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3991,7 +3995,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4190,7 +4194,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_set_ext_fat_caps *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4206,7 +4210,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4684,7 +4688,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
if (iface == 0xFFFFFFFF)
return -1;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4701,7 +4705,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4735,7 +4739,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
struct be_cmd_resp_get_iface_list *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4756,7 +4760,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4850,7 +4854,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
if (BEx_chip(adapter))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4868,7 +4872,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
req->enable = 1;
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4941,7 +4945,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
u32 link_config = 0;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4969,7 +4973,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5000,8 +5004,7 @@ int be_cmd_set_features(struct be_adapter *adapter)
struct be_mcc_wrb *wrb;
int status;
- if (mutex_lock_interruptible(&adapter->mcc_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5039,7 +5042,7 @@ err:
dev_info(&adapter->pdev->dev,
"Adapter does not support HW error recovery\n");
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5053,7 +5056,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
struct be_cmd_resp_hdr *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5076,7 +5079,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
EXPORT_SYMBOL(be_roce_mcc_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d70818f06be7..5e2d3ddb5d43 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1415,7 +1415,7 @@ struct flash_section_entry {
} __packed;
struct flash_section_info {
- u8 cookie[32];
+ u8 cookie[32] __nonstring;
struct flash_section_hdr fsec_hdr;
struct flash_section_entry fsec_entry[32];
} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 875fe379eea2..3d2e21592119 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5667,8 +5667,8 @@ static int be_drv_init(struct be_adapter *adapter)
}
mutex_init(&adapter->mbox_lock);
- mutex_init(&adapter->mcc_lock);
mutex_init(&adapter->rx_filter_lock);
+ spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
init_completion(&adapter->et_cmd_compl);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 95a5295d0361..eba73246f986 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -67,6 +67,8 @@
#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE)
#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
#define TSNEP_TX_TYPE_XSK BIT(12)
+#define TSNEP_TX_TYPE_TSTAMP BIT(13)
+#define TSNEP_TX_TYPE_SKB_TSTAMP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_TSTAMP)
#define TSNEP_XDP_TX BIT(0)
#define TSNEP_XDP_REDIRECT BIT(1)
@@ -221,20 +223,19 @@ static void tsnep_phy_link_status_change(struct net_device *netdev)
static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
{
- int retval;
-
- retval = phy_loopback(adapter->phydev, enable);
+ int speed;
- /* PHY link state change is not signaled if loopback is enabled, it
- * would delay a working loopback anyway, let's ensure that loopback
- * is working immediately by setting link mode directly
- */
- if (!retval && enable) {
- netif_carrier_on(adapter->netdev);
- tsnep_set_link_mode(adapter);
+ if (enable) {
+ if (adapter->phydev->autoneg == AUTONEG_DISABLE &&
+ adapter->phydev->speed == SPEED_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_1000;
+ } else {
+ speed = 0;
}
- return retval;
+ return phy_loopback(adapter->phydev, enable, speed);
}
static int tsnep_phy_open(struct tsnep_adapter *adapter)
@@ -387,8 +388,7 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
if (entry->skb) {
entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
- if ((entry->type & TSNEP_TX_TYPE_SKB) &&
- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
+ if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP)
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
/* toggle user flag to prevent false acknowledge
@@ -480,7 +480,8 @@ static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry,
return mapped;
}
-static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
+static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count,
+ bool do_tstamp)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
@@ -506,6 +507,9 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
entry->type = TSNEP_TX_TYPE_SKB_INLINE;
mapped = 0;
}
+
+ if (do_tstamp)
+ entry->type |= TSNEP_TX_TYPE_TSTAMP;
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -559,11 +563,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
struct tsnep_tx *tx)
{
- int count = 1;
struct tsnep_tx_entry *entry;
+ bool do_tstamp = false;
+ int count = 1;
int length;
- int i;
int retval;
+ int i;
if (skb_shinfo(skb)->nr_frags > 0)
count += skb_shinfo(skb)->nr_frags;
@@ -580,7 +585,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
entry = &tx->entry[tx->write];
entry->skb = skb;
- retval = tsnep_tx_map(skb, tx, count);
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ do_tstamp = true;
+ }
+
+ retval = tsnep_tx_map(skb, tx, count, do_tstamp);
if (retval < 0) {
tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
@@ -592,9 +603,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
}
length = retval;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-
for (i = 0; i < count; i++)
tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
i == count - 1);
@@ -845,15 +853,14 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
length = tsnep_tx_unmap(tx, tx->read, count);
- if ((entry->type & TSNEP_TX_TYPE_SKB) &&
- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) &&
(__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
struct skb_shared_hwtstamps hwtstamps;
u64 timestamp;
- if (skb_shinfo(entry->skb)->tx_flags &
- SKBTX_HW_TSTAMP_USE_CYCLES)
+ if (entry->skb->sk &&
+ READ_ONCE(entry->skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
timestamp =
__le64_to_cpu(entry->desc_wb->counter);
else
@@ -1966,23 +1973,41 @@ failed:
static void tsnep_queue_enable(struct tsnep_queue *queue)
{
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ netif_napi_set_irq(&queue->napi, queue->irq);
napi_enable(&queue->napi);
- tsnep_enable_irq(queue->adapter, queue->irq_mask);
+ tsnep_enable_irq(adapter, queue->irq_mask);
- if (queue->tx)
+ if (queue->tx) {
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, &queue->napi);
tsnep_tx_enable(queue->tx);
+ }
- if (queue->rx)
+ if (queue->rx) {
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, &queue->napi);
tsnep_rx_enable(queue->rx);
+ }
}
static void tsnep_queue_disable(struct tsnep_queue *queue)
{
- if (queue->tx)
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ if (queue->rx)
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+
+ if (queue->tx) {
tsnep_tx_disable(queue->tx, &queue->napi);
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
napi_disable(&queue->napi);
- tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ tsnep_disable_irq(adapter, queue->irq_mask);
/* disable RX after NAPI polling has been disabled, because RX can be
* enabled during NAPI polling
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index c699bd6bcbb9..474073c7f94d 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -31,6 +31,7 @@ config FTGMAC100
depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
+ select FIXED_PHY
select MDIO_ASPEED if MACH_ASPEED_G6
select CRC32
help
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 17ec35e75a65..a98d5af3f9e3 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1906,7 +1906,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_phy_connect;
}
- phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, np);
+ phydev = fixed_phy_register(&ncsi_phy_status, np);
if (IS_ERR(phydev)) {
dev_err(&pdev->dev, "failed to register fixed PHY device\n");
err = PTR_ERR(phydev);
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index ed18450fd2cc..6ac8547ef9b8 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1074,7 +1074,7 @@ static void allocate_rx_buffers(struct net_device *dev)
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = np->mii.dev;
void __iomem *ioaddr = np->mem;
int old_crvalue = np->crvalue;
@@ -1163,7 +1163,7 @@ static void enable_rxtx(struct net_device *dev)
static void reset_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, reset_timer);
+ struct netdev_private *np = timer_container_of(np, t, reset_timer);
struct net_device *dev = np->mii.dev;
unsigned long flags;
@@ -1900,8 +1900,8 @@ static int netdev_close(struct net_device *dev)
/* Stop the chip's Tx and Rx processes. */
stop_nic_rxtx(ioaddr, 0);
- del_timer_sync(&np->timer);
- del_timer_sync(&np->reset_timer);
+ timer_delete_sync(&np->timer);
+ timer_delete_sync(&np->reset_timer);
free_irq(np->pci_dev->irq, dev);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 75401d2a5fb4..bbef47c3480c 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -71,7 +71,6 @@ config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
select PHYLIB
depends on OF
- select MDIO_DEVRES
select OF_MDIO
help
This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
@@ -81,8 +80,7 @@ config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
- select PHYLIB
- select FIXED_PHY
+ select PHYLINK
help
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index bf5baef5c3e0..23c23cca2620 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2281,7 +2281,7 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
- new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+ new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0;
/* Release the initial buffer */
xdp_return_frame_rx_napi(xdpf);
@@ -3089,15 +3089,25 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
return nxmit;
}
-static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int dpaa_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct dpaa_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
+ config->tx_type = priv->tx_tstamp ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
- switch (config.tx_type) {
+ return 0;
+}
+
+static int dpaa_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
+
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
/* Couldn't disable rx/tx timestamping separately.
* Do nothing here.
@@ -3112,7 +3122,7 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -ERANGE;
}
- if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
/* Couldn't disable rx/tx timestamping separately.
* Do nothing here.
*/
@@ -3121,28 +3131,17 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
priv->rx_tstamp = true;
/* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{
- int ret = -EINVAL;
struct dpaa_priv *priv = netdev_priv(net_dev);
- if (cmd == SIOCGMIIREG) {
- if (net_dev->phydev)
- return phylink_mii_ioctl(priv->mac_dev->phylink, rq,
- cmd);
- }
-
- if (cmd == SIOCSHWTSTAMP)
- return dpaa_ts_ioctl(net_dev, rq, cmd);
-
- return ret;
+ return phylink_mii_ioctl(priv->mac_dev->phylink, rq, cmd);
}
static const struct net_device_ops dpaa_ops = {
@@ -3160,6 +3159,8 @@ static const struct net_device_ops dpaa_ops = {
.ndo_change_mtu = dpaa_change_mtu,
.ndo_bpf = dpaa_xdp,
.ndo_xdp_xmit = dpaa_xdp_xmit,
+ .ndo_hwtstamp_get = dpaa_hwtstamp_get,
+ .ndo_hwtstamp_set = dpaa_hwtstamp_set,
};
static int dpaa_napi_add(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 29886a8ba73f..b82f121cadad 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2585,40 +2585,52 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
return 0;
}
-static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int dpaa2_eth_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct dpaa2_eth_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
if (!dpaa2_ptp)
return -EINVAL;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_ONESTEP_SYNC:
- priv->tx_tstamp_type = config.tx_type;
+ priv->tx_tstamp_type = config->tx_type;
break;
default:
return -ERANGE;
}
- if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
priv->rx_tstamp = false;
} else {
priv->rx_tstamp = true;
/* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
dpaa2_ptp_onestep_reg_update_method(priv);
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
+}
+
+static int dpaa2_eth_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+ if (!dpaa2_ptp)
+ return -EINVAL;
+
+ config->tx_type = priv->tx_tstamp_type;
+ config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2626,9 +2638,6 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct dpaa2_eth_priv *priv = netdev_priv(dev);
int err;
- if (cmd == SIOCSHWTSTAMP)
- return dpaa2_eth_ts_ioctl(dev, rq, cmd);
-
mutex_lock(&priv->mac_lock);
if (dpaa2_eth_is_type_phy(priv)) {
@@ -3034,7 +3043,9 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_xsk_wakeup = dpaa2_xsk_wakeup,
.ndo_setup_tc = dpaa2_eth_setup_tc,
.ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
- .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid,
+ .ndo_hwtstamp_get = dpaa2_eth_hwtstamp_get,
+ .ndo_hwtstamp_set = dpaa2_eth_hwtstamp_set,
};
static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -3928,6 +3939,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
MEM_TYPE_PAGE_ORDER0, NULL);
if (err) {
dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
+ xdp_rxq_info_unreg(&fq->channel->xdp_rxq);
return err;
}
@@ -4421,17 +4433,25 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
return -EINVAL;
}
if (err)
- return err;
+ goto out;
}
err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX, &priv->tx_qdid);
if (err) {
dev_err(dev, "dpni_get_qdid() failed\n");
- return err;
+ goto out;
}
return 0;
+
+out:
+ while (i--) {
+ if (priv->fq[i].type == DPAA2_RX_FQ &&
+ xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
+ xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
+ }
+ return err;
}
/* Allocate rings for storing incoming frame descriptors */
@@ -4814,6 +4834,17 @@ static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
}
}
+static void dpaa2_eth_free_rx_xdp_rxq(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ if (priv->fq[i].type == DPAA2_RX_FQ &&
+ xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
+ xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
+ }
+}
+
static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
{
struct device *dev;
@@ -5017,6 +5048,7 @@ err_alloc_percpu_extras:
free_percpu(priv->percpu_stats);
err_alloc_percpu_stats:
dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_rx_xdp_rxq(priv);
err_bind:
dpaa2_eth_free_dpbps(priv);
err_dpbp_setup:
@@ -5069,6 +5101,7 @@ static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
free_percpu(priv->percpu_extras);
dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_rx_xdp_rxq(priv);
dpaa2_eth_free_dpbps(priv);
dpaa2_eth_free_dpio(priv);
dpaa2_eth_free_dpni(priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index a293b08f36d4..147a93bf9fa9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -780,13 +780,14 @@ struct ethsw_dump_ctx {
static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
struct ethsw_dump_ctx *dump)
{
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 6c2779047dcd..54b0f0a5a6bb 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config FSL_ENETC_CORE
tristate
+ select NXP_NETC_LIB if NXP_NTMP
help
This module supports common functionality between the PF and VF
drivers for the NXP ENETC controller.
@@ -15,10 +16,19 @@ config NXP_ENETC_PF_COMMON
If compiled as module (M), the module name is nxp-enetc-pf-common.
+config NXP_NETC_LIB
+ tristate
+ help
+ This module provides common functionalities for both ENETC and NETC
+ Switch, such as NETC Table Management Protocol (NTMP) 2.0, common tc
+ flower and debugfs interfaces and so on.
+
+config NXP_NTMP
+ bool
+
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI_MSI
- select MDIO_DEVRES
select FSL_ENETC_CORE
select FSL_ENETC_IERB
select FSL_ENETC_MDIO
@@ -36,10 +46,10 @@ config FSL_ENETC
config NXP_ENETC4
tristate "ENETC4 PF driver"
depends on PCI_MSI
- select MDIO_DEVRES
select FSL_ENETC_CORE
select FSL_ENETC_MDIO
select NXP_ENETC_PF_COMMON
+ select NXP_NTMP
select PHYLINK
select DIMLIB
help
@@ -73,7 +83,7 @@ config FSL_ENETC_IERB
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
- depends on PCI && MDIO_DEVRES && MDIO_BUS
+ depends on PCI && PHYLIB
help
This driver supports NXP ENETC Central MDIO controller as a PCIe
physical function (PF) device.
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index 6fd27ee4fcd1..f1c5ad45fd76 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -6,6 +6,9 @@ fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o
obj-$(CONFIG_NXP_ENETC_PF_COMMON) += nxp-enetc-pf-common.o
nxp-enetc-pf-common-y := enetc_pf_common.o
+obj-$(CONFIG_NXP_NETC_LIB) += nxp-netc-lib.o
+nxp-netc-lib-y := ntmp.o
+
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
fsl-enetc-y := enetc_pf.o
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
@@ -13,6 +16,7 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_NXP_ENETC4) += nxp-enetc4.o
nxp-enetc4-y := enetc4_pf.o
+nxp-enetc4-$(CONFIG_DEBUG_FS) += enetc4_debugfs.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 35634c516e26..dcc3fbac3481 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -29,10 +29,49 @@ EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
u8 preemptible_tcs)
{
+ if (!(priv->si->hw_features & ENETC_SI_F_QBU))
+ return;
+
priv->preemptible_tcs = preemptible_tcs;
enetc_mm_commit_preemptible_tcs(priv);
}
+static int enetc_mac_addr_hash_idx(const u8 *addr)
+{
+ u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
+ u64 mask = 0;
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ mask |= BIT_ULL(i * 6);
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight64(fold & (mask << i)) & 0x1) << i;
+
+ return res;
+}
+
+void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr)
+{
+ int idx = enetc_mac_addr_hash_idx(addr);
+
+ /* add hash table entry */
+ __set_bit(idx, filter->mac_hash_table);
+ filter->mac_addr_cnt++;
+}
+EXPORT_SYMBOL_GPL(enetc_add_mac_addr_ht_filter);
+
+void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
+{
+ filter->mac_addr_cnt = 0;
+
+ bitmap_zero(filter->mac_hash_table,
+ ENETC_MADDR_HASH_TBL_SZ);
+}
+EXPORT_SYMBOL_GPL(enetc_reset_mac_addr_filter);
+
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
int num_tx_rings = priv->num_tx_rings;
@@ -143,6 +182,45 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
return 0;
}
+static bool enetc_tx_csum_offload_check(struct sk_buff *skb)
+{
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ case offsetof(struct udphdr, check):
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool enetc_skb_is_ipv6(struct sk_buff *skb)
+{
+ return vlan_get_protocol(skb) == htons(ETH_P_IPV6);
+}
+
+static bool enetc_skb_is_tcp(struct sk_buff *skb)
+{
+ return skb->csum_offset == offsetof(struct tcphdr, check);
+}
+
+/**
+ * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
+ * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
+ * @count: Number of Tx buffer descriptors which need to be unmapped
+ * @i: Index of the last successfully mapped Tx buffer descriptor
+ */
+static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
+{
+ while (count--) {
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ }
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
@@ -160,6 +238,29 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_addr_t dma;
u8 flags = 0;
+ enetc_clear_tx_bd(&temp_bd);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* Can not support TSD and checksum offload at the same time */
+ if (priv->active_offloads & ENETC_F_TXCSUM &&
+ enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) {
+ temp_bd.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START,
+ skb_network_offset(skb));
+ temp_bd.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ skb_network_header_len(skb) / 4);
+ temp_bd.l3_aux1 |= FIELD_PREP(ENETC_TX_BD_L3T,
+ enetc_skb_is_ipv6(skb));
+ if (enetc_skb_is_tcp(skb))
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_TCP);
+ else
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_UDP);
+ flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS;
+ } else if (skb_checksum_help(skb)) {
+ return 0;
+ }
+ }
+
i = tx_ring->next_to_use;
txbd = ENETC_TXBD(*tx_ring, i);
prefetchw(txbd);
@@ -170,7 +271,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
temp_bd.addr = cpu_to_le64(dma);
temp_bd.buf_len = cpu_to_le16(len);
- temp_bd.lstatus = 0;
tx_swbd = &tx_ring->tx_swbd[i];
tx_swbd->dma = dma;
@@ -233,9 +333,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
if (do_onestep_tstamp) {
- u32 lo, hi, val;
- u64 sec, nsec;
+ __be32 new_sec_l, new_nsec;
+ u32 lo, hi, nsec, val;
+ __be16 new_sec_h;
u8 *data;
+ u64 sec;
lo = enetc_rd_hot(hw, ENETC_SICTR0);
hi = enetc_rd_hot(hw, ENETC_SICTR1);
@@ -249,13 +351,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
/* Update originTimestamp field of Sync packet
* - 48 bits seconds field
* - 32 bits nanseconds field
+ *
+ * In addition, the UDP checksum needs to be updated
+ * by software after updating originTimestamp field,
+ * otherwise the hardware will calculate the wrong
+ * checksum when updating the correction field and
+ * update it to the packet.
*/
data = skb_mac_header(skb);
- *(__be16 *)(data + offset2) =
- htons((sec >> 32) & 0xffff);
- *(__be32 *)(data + offset2 + 2) =
- htonl(sec & 0xffffffff);
- *(__be32 *)(data + offset2 + 6) = htonl(nsec);
+ new_sec_h = htons((sec >> 32) & 0xffff);
+ new_sec_l = htonl(sec & 0xffffffff);
+ new_nsec = htonl(nsec);
+ if (udp) {
+ struct udphdr *uh = udp_hdr(skb);
+ __be32 old_sec_l, old_nsec;
+ __be16 old_sec_h;
+
+ old_sec_h = *(__be16 *)(data + offset2);
+ inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
+ new_sec_h, false);
+
+ old_sec_l = *(__be32 *)(data + offset2 + 2);
+ inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
+ new_sec_l, false);
+
+ old_nsec = *(__be32 *)(data + offset2 + 6);
+ inet_proto_csum_replace4(&uh->check, skb, old_nsec,
+ new_nsec, false);
+ }
+
+ *(__be16 *)(data + offset2) = new_sec_h;
+ *(__be32 *)(data + offset2 + 2) = new_sec_l;
+ *(__be32 *)(data + offset2 + 6) = new_nsec;
/* Configure single-step register */
val = ENETC_PM0_SINGLE_STEP_EN;
@@ -326,25 +453,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_err:
dev_err(tx_ring->dev, "DMA map error");
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
-static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- struct enetc_tx_swbd *tx_swbd,
- union enetc_tx_bd *txbd, int *i, int hdr_len,
- int data_len)
+static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
{
union enetc_tx_bd txbd_tmp;
u8 flags = 0, e_flags = 0;
dma_addr_t addr;
+ int count = 1;
enetc_clear_tx_bd(&txbd_tmp);
addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
@@ -387,7 +509,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
/* Write the BD */
txbd_tmp.ext.e_flags = e_flags;
*txbd = txbd_tmp;
+ count++;
}
+
+ return count;
}
static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
@@ -486,8 +611,233 @@ static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso
}
}
+static int enetc_lso_count_descs(const struct sk_buff *skb)
+{
+ /* 4 BDs: 1 BD for LSO header + 1 BD for extended BD + 1 BD
+ * for linear area data but not include LSO header, namely
+ * skb_headlen(skb) - lso_hdr_len (it may be 0, but that's
+ * okay, we only need to consider the worst case). And 1 BD
+ * for gap.
+ */
+ return skb_shinfo(skb)->nr_frags + 4;
+}
+
+static int enetc_lso_get_hdr_len(const struct sk_buff *skb)
+{
+ int hdr_len, tlen;
+
+ tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr);
+ hdr_len = skb_transport_offset(skb) + tlen;
+
+ return hdr_len;
+}
+
+static void enetc_lso_start(struct sk_buff *skb, struct enetc_lso_t *lso)
+{
+ lso->lso_seg_size = skb_shinfo(skb)->gso_size;
+ lso->ipv6 = enetc_skb_is_ipv6(skb);
+ lso->tcp = skb_is_gso_tcp(skb);
+ lso->l3_hdr_len = skb_network_header_len(skb);
+ lso->l3_start = skb_network_offset(skb);
+ lso->hdr_len = enetc_lso_get_hdr_len(skb);
+ lso->total_len = skb->len - lso->hdr_len;
+}
+
+static void enetc_lso_map_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso)
+{
+ union enetc_tx_bd txbd_tmp, *txbd;
+ struct enetc_tx_swbd *tx_swbd;
+ u16 frm_len, frm_len_ext;
+ u8 flags, e_flags = 0;
+ dma_addr_t addr;
+ char *hdr;
+
+ /* Get the first BD of the LSO BDs chain */
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ /* Prepare LSO header: MAC + IP + TCP/UDP */
+ hdr = tx_ring->tso_headers + *i * TSO_HEADER_SIZE;
+ memcpy(hdr, skb->data, lso->hdr_len);
+ addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+
+ /* {frm_len_ext, frm_len} indicates the total length of
+ * large transmit data unit. frm_len contains the 16 least
+ * significant bits and frm_len_ext contains the 4 most
+ * significant bits.
+ */
+ frm_len = lso->total_len & 0xffff;
+ frm_len_ext = (lso->total_len >> 16) & 0xf;
+
+ /* Set the flags of the first BD */
+ flags = ENETC_TXBD_FLAGS_EX | ENETC_TXBD_FLAGS_CSUM_LSO |
+ ENETC_TXBD_FLAGS_LSO | ENETC_TXBD_FLAGS_L4CS;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.hdr_len = cpu_to_le16(lso->hdr_len);
+
+ /* first BD needs frm_len and offload flags set */
+ txbd_tmp.frm_len = cpu_to_le16(frm_len);
+ txbd_tmp.flags = flags;
+
+ txbd_tmp.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, lso->l3_start);
+ /* l3_hdr_size in 32-bits (4 bytes) */
+ txbd_tmp.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ lso->l3_hdr_len / 4);
+ if (lso->ipv6)
+ txbd_tmp.l3_aux1 |= ENETC_TX_BD_L3T;
+ else
+ txbd_tmp.l3_aux0 |= ENETC_TX_BD_IPCS;
+
+ txbd_tmp.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, lso->tcp ?
+ ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP);
+
+ /* For the LSO header we do not set the dma address since
+ * we do not want it unmapped when we do cleanup. We still
+ * set len so that we count the bytes sent.
+ */
+ tx_swbd->len = lso->hdr_len;
+ tx_swbd->do_twostep_tstamp = false;
+ tx_swbd->check_wb = false;
+
+ /* Actually write the header in the BD */
+ *txbd = txbd_tmp;
+
+ /* Get the next BD, and the next BD is extended BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ if (skb_vlan_tag_present(skb)) {
+ /* Setup the VLAN fields */
+ txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ txbd_tmp.ext.tpid = ENETC_TPID_8021Q;
+ e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
+ }
+
+ /* Write the BD */
+ txbd_tmp.ext.e_flags = e_flags;
+ txbd_tmp.ext.lso_sg_size = cpu_to_le16(lso->lso_seg_size);
+ txbd_tmp.ext.frm_len_ext = cpu_to_le16(frm_len_ext);
+ *txbd = txbd_tmp;
+}
+
+static int enetc_lso_map_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso, int *count)
+{
+ union enetc_tx_bd txbd_tmp, *txbd = NULL;
+ struct enetc_tx_swbd *tx_swbd;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ u8 flags = 0;
+ int len, f;
+
+ len = skb_headlen(skb) - lso->hdr_len;
+ if (len > 0) {
+ dma = dma_map_single(tx_ring->dev, skb->data + lso->hdr_len,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 0;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
+ if (txbd)
+ *txbd = txbd_tmp;
+
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(tx_ring->dev, frag);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ /* Get the next BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 1;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ /* Last BD needs 'F' bit set */
+ flags |= ENETC_TXBD_FLAGS_F;
+ txbd_tmp.flags = flags;
+ *txbd = txbd_tmp;
+
+ tx_swbd->is_eof = 1;
+ tx_swbd->skb = skb;
+
+ return 0;
+}
+
+static int enetc_lso_hw_offload(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_lso_t lso = {0};
+ int err, i, count = 0;
+
+ /* Initialize the LSO handler */
+ enetc_lso_start(skb, &lso);
+ i = tx_ring->next_to_use;
+
+ enetc_lso_map_hdr(tx_ring, skb, &i, &lso);
+ /* First BD and an extend BD */
+ count += 2;
+
+ err = enetc_lso_map_data(tx_ring, skb, &i, &lso, &count);
+ if (err)
+ goto dma_err;
+
+ /* Go to the next BD */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ tx_ring->next_to_use = i;
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+dma_err:
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (--count);
+
+ return 0;
+}
+
static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
int hdr_len, total_len, data_len;
struct enetc_tx_swbd *tx_swbd;
union enetc_tx_bd *txbd;
@@ -519,9 +869,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
/* compute the csum over the L4 header */
csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
- enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
+ &i, hdr_len, data_len);
bd_data_num = 0;
- count++;
while (data_len > 0) {
int size;
@@ -545,15 +895,20 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
tso.data, size,
size == data_len);
- if (err)
+ if (err) {
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+
goto err_map_data;
+ }
data_len -= size;
count++;
bd_data_num++;
tso_build_data(skb, &tso, size);
- if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
+ if (unlikely(bd_data_num >= priv->max_frags && data_len))
goto err_chained_bd;
}
@@ -575,13 +930,7 @@ err_map_data:
dev_err(tx_ring->dev, "DMA map error");
err_chained_bd:
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
@@ -591,7 +940,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
- int count, err;
+ int count;
/* Queue one-step Sync packet if already locked */
if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
@@ -605,16 +954,28 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
tx_ring = priv->tx_ring[skb->queue_mapping];
if (skb_is_gso(skb)) {
- if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+ /* LSO data unit lengths of up to 256KB are supported */
+ if (priv->active_offloads & ENETC_F_LSO &&
+ (skb->len - enetc_lso_get_hdr_len(skb)) <=
+ ENETC_LSO_MAX_DATA_LEN) {
+ if (enetc_bd_unused(tx_ring) < enetc_lso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- enetc_lock_mdio();
- count = enetc_map_tx_tso_buffs(tx_ring, skb);
- enetc_unlock_mdio();
+ count = enetc_lso_hw_offload(tx_ring, skb);
+ } else {
+ if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ enetc_lock_mdio();
+ count = enetc_map_tx_tso_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ }
} else {
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_shinfo(skb)->nr_frags > priv->max_frags))
if (unlikely(skb_linearize(skb)))
goto drop_packet_err;
@@ -624,11 +985,6 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- err = skb_checksum_help(skb);
- if (err)
- goto drop_packet_err;
- }
enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb);
enetc_unlock_mdio();
@@ -637,7 +993,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
if (unlikely(!count))
goto drop_packet_err;
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED(priv->max_frags))
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_OK;
@@ -905,7 +1261,8 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
!test_bit(ENETC_TX_DOWN, &priv->flags) &&
- (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+ (enetc_bd_unused(tx_ring) >=
+ ENETC_TXBDS_MAX_NEEDED(priv->max_frags)))) {
netif_wake_subqueue(ndev, tx_ring->index);
}
@@ -1529,6 +1886,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
}
}
+static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ enetc_flip_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[rx_ring_first]);
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+}
+
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
struct napi_struct *napi, int work_limit,
struct bpf_prog *prog)
@@ -1547,11 +1914,10 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
- int orig_i, orig_cleaned_cnt;
struct xdp_buff xdp_buff;
struct sk_buff *skb;
+ int orig_i, err;
u32 bd_status;
- int err;
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
@@ -1566,7 +1932,6 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
orig_rxbd = rxbd;
- orig_cleaned_cnt = cleaned_cnt;
orig_i = i;
enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
@@ -1594,15 +1959,21 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
rx_ring->stats.xdp_drops++;
break;
case XDP_PASS:
- rxbd = orig_rxbd;
- cleaned_cnt = orig_cleaned_cnt;
- i = orig_i;
-
- skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
- &i, &cleaned_cnt,
- ENETC_RXB_DMA_SIZE_XDP);
- if (unlikely(!skb))
+ skb = xdp_build_skb_from_buff(&xdp_buff);
+ /* Probably under memory pressure, stop NAPI */
+ if (unlikely(!skb)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_drops++;
goto out;
+ }
+
+ enetc_get_offloads(rx_ring, orig_rxbd, skb);
+
+ /* These buffers are about to be owned by the stack.
+ * Update our buffer cache (the rx_swbd array elements)
+ * with their other page halves.
+ */
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
napi_gro_receive(napi, skb);
break;
@@ -1622,7 +1993,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
tx_ring->stats.xdp_tx_drops++;
} else {
- tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ tx_ring->stats.xdp_tx++;
rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
xdp_tx_frm_cnt++;
/* The XDP_TX enqueue was successful, so we
@@ -1644,11 +2015,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_redirect_failures++;
} else {
- while (orig_i != i) {
- enetc_flip_rx_buff(rx_ring,
- &rx_ring->rx_swbd[orig_i]);
- enetc_bdr_idx_inc(rx_ring, &orig_i);
- }
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
xdp_redirect_frm_cnt++;
rx_ring->stats.xdp_redirect++;
}
@@ -1757,14 +2124,8 @@ void enetc_get_si_caps(struct enetc_si *si)
si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
}
- if (val & ENETC_SIPCAPR0_QBV)
- si->hw_features |= ENETC_SI_F_QBV;
-
- if (val & ENETC_SIPCAPR0_QBU)
- si->hw_features |= ENETC_SI_F_QBU;
-
- if (val & ENETC_SIPCAPR0_PSFP)
- si->hw_features |= ENETC_SI_F_PSFP;
+ if (val & ENETC_SIPCAPR0_LSO)
+ si->hw_features |= ENETC_SI_F_LSO;
}
EXPORT_SYMBOL_GPL(enetc_get_si_caps);
@@ -2054,13 +2415,35 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
for (i = 0; i < si->num_rss; i++)
rss_table[i] = i % num_groups;
- enetc_set_rss_table(si, rss_table, si->num_rss);
+ si->ops->set_rss_table(si, rss_table, si->num_rss);
kfree(rss_table);
return 0;
}
+static void enetc_set_lso_flags_mask(struct enetc_hw *hw)
+{
+ enetc_wr(hw, ENETC4_SILSOSFMR0,
+ SILSOSFMR0_VAL_SET(ENETC4_TCP_NL_SEG_FLAGS_DMASK,
+ ENETC4_TCP_NL_SEG_FLAGS_DMASK));
+ enetc_wr(hw, ENETC4_SILSOSFMR1, 0);
+}
+
+static void enetc_set_rss(struct net_device *ndev, int en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
+
+ reg = enetc_rd(hw, ENETC_SIMR);
+ reg &= ~ENETC_SIMR_RSSE;
+ reg |= (en) ? ENETC_SIMR_RSSE : 0;
+ enetc_wr(hw, ENETC_SIMR, reg);
+}
+
int enetc_configure_si(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
@@ -2074,13 +2457,16 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
/* enable SI */
enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
- /* TODO: RSS support for i.MX95 will be supported later, and the
- * is_enetc_rev1() condition will be removed
- */
- if (si->num_rss && is_enetc_rev1(si)) {
+ if (si->hw_features & ENETC_SI_F_LSO)
+ enetc_set_lso_flags_mask(hw);
+
+ if (si->num_rss) {
err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
if (err)
return err;
+
+ if (priv->ndev->features & NETIF_F_RXHASH)
+ enetc_set_rss(priv->ndev, true);
}
return 0;
@@ -2873,22 +3259,6 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev)
}
EXPORT_SYMBOL_GPL(enetc_get_stats);
-static int enetc_set_rss(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- u32 reg;
-
- enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
-
- reg = enetc_rd(hw, ENETC_SIMR);
- reg &= ~ENETC_SIMR_RSSE;
- reg |= (en) ? ENETC_SIMR_RSSE : 0;
- enetc_wr(hw, ENETC_SIMR, reg);
-
- return 0;
-}
-
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -2926,16 +3296,17 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features)
}
EXPORT_SYMBOL_GPL(enetc_set_features);
-static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
+int enetc_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int err, new_offloads = priv->active_offloads;
- struct hwtstamp_config config;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK))
+ return -EOPNOTSUPP;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
break;
@@ -2944,6 +3315,9 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
new_offloads |= ENETC_F_TX_TSTAMP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!enetc_si_is_pf(priv->si))
+ return -EOPNOTSUPP;
+
new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
break;
@@ -2951,13 +3325,13 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
new_offloads &= ~ENETC_F_RX_TSTAMP;
break;
default:
new_offloads |= ENETC_F_RX_TSTAMP;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) {
@@ -2970,42 +3344,36 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
priv->active_offloads = new_offloads;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
+EXPORT_SYMBOL_GPL(enetc_hwtstamp_set);
-static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
+int enetc_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct hwtstamp_config config;
- config.flags = 0;
+ if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK))
+ return -EOPNOTSUPP;
if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
- config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
- config.tx_type = HWTSTAMP_TX_ON;
+ config->tx_type = HWTSTAMP_TX_ON;
else
- config.tx_type = HWTSTAMP_TX_OFF;
+ config->tx_type = HWTSTAMP_TX_OFF;
- config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ config->rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
+EXPORT_SYMBOL_GPL(enetc_hwtstamp_get);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) {
- if (cmd == SIOCSHWTSTAMP)
- return enetc_hwtstamp_set(ndev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return enetc_hwtstamp_get(ndev, rq);
- }
-
if (!priv->phylink)
return -EOPNOTSUPP;
@@ -3033,7 +3401,8 @@ static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
bdr->buffer_offset = ENETC_RXB_PAD;
priv->rx_ring[i] = bdr;
- err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
+ err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0,
+ ENETC_RXB_DMA_SIZE_XDP);
if (err)
goto free_vector;
@@ -3275,17 +3644,21 @@ EXPORT_SYMBOL_GPL(enetc_pci_remove);
static const struct enetc_drvdata enetc_pf_data = {
.sysclk_freq = ENETC_CLK_400M,
.pmac_offset = ENETC_PMAC_OFFSET,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
.eth_ops = &enetc_pf_ethtool_ops,
};
static const struct enetc_drvdata enetc4_pf_data = {
.sysclk_freq = ENETC_CLK_333M,
+ .tx_csum = true,
+ .max_frags = ENETC4_MAX_SKB_FRAGS,
.pmac_offset = ENETC4_PMAC_OFFSET,
.eth_ops = &enetc4_pf_ethtool_ops,
};
static const struct enetc_drvdata enetc_vf_data = {
.sysclk_freq = ENETC_CLK_400M,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
.eth_ops = &enetc_vf_ethtool_ops,
};
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 72fa03dbc2dd..872d2cbd088b 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
+#include <linux/fsl/ntmp.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
#include <linux/dim.h>
@@ -22,6 +23,18 @@
#define ENETC_CBD_DATA_MEM_ALIGN 64
+#define ENETC_MADDR_HASH_TBL_SZ 64
+
+enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
+
+struct enetc_mac_filter {
+ union {
+ char mac_addr[ETH_ALEN];
+ DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
+ };
+ int mac_addr_cnt;
+};
+
struct enetc_tx_swbd {
union {
struct sk_buff *skb;
@@ -41,6 +54,18 @@ struct enetc_tx_swbd {
u8 qbv_en:1;
};
+struct enetc_lso_t {
+ bool ipv6;
+ bool tcp;
+ u8 l3_hdr_len;
+ u8 hdr_len; /* LSO header length */
+ u8 l3_start;
+ u16 lso_seg_size;
+ int total_len; /* total data length, not include LSO header */
+};
+
+#define ENETC_LSO_MAX_DATA_LEN SZ_256K
+
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
@@ -59,9 +84,16 @@ struct enetc_rx_swbd {
/* ENETC overhead: optional extension BD + 1 BD gap */
#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
-/* max # of chained Tx BDs is 15, including head and extension BD */
+/* For LS1028A, max # of chained Tx BDs is 15, including head and
+ * extension BD.
+ */
#define ENETC_MAX_SKB_FRAGS 13
-#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+/* For ENETC v4 and later versions, max # of chained Tx BDs is 63,
+ * including head and extension BD, but the range of MAX_SKB_FRAGS
+ * is 17 ~ 45, so set ENETC4_MAX_SKB_FRAGS to MAX_SKB_FRAGS.
+ */
+#define ENETC4_MAX_SKB_FRAGS MAX_SKB_FRAGS
+#define ENETC_TXBDS_MAX_NEEDED(x) ENETC_TXBDS_NEEDED((x) + 1)
struct enetc_ring_stats {
unsigned int packets;
@@ -231,9 +263,12 @@ enum enetc_errata {
#define ENETC_SI_F_PSFP BIT(0)
#define ENETC_SI_F_QBV BIT(1)
#define ENETC_SI_F_QBU BIT(2)
+#define ENETC_SI_F_LSO BIT(3)
struct enetc_drvdata {
u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */
+ u8 tx_csum:1;
+ u8 max_frags;
u64 sysclk_freq;
const struct ethtool_ops *eth_ops;
};
@@ -244,6 +279,19 @@ struct enetc_platform_info {
const struct enetc_drvdata *data;
};
+struct enetc_si;
+
+/*
+ * This structure defines the some common hooks for ENETC PSI and VSI.
+ * In addition, since VSI only uses the struct enetc_si as its private
+ * driver data, so this structure also define some hooks specifically
+ * for VSI. For VSI-specific hooks, the format is ‘vf_*()’.
+ */
+struct enetc_si_ops {
+ int (*get_rss_table)(struct enetc_si *si, u32 *table, int count);
+ int (*set_rss_table)(struct enetc_si *si, const u32 *table, int count);
+};
+
/* PCI IEP device data */
struct enetc_si {
struct pci_dev *pdev;
@@ -252,7 +300,10 @@ struct enetc_si {
struct net_device *ndev; /* back ref. */
- struct enetc_cbdr cbd_ring;
+ union {
+ struct enetc_cbdr cbd_ring; /* Only ENETC 1.0 */
+ struct ntmp_user ntmp_user; /* ENETC 4.1 and later */
+ };
int num_rx_rings; /* how many rings are available in the SI */
int num_tx_rings;
@@ -262,6 +313,11 @@ struct enetc_si {
u16 revision;
int hw_features;
const struct enetc_drvdata *drvdata;
+ const struct enetc_si_ops *ops;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct rx_mode_task;
+ struct dentry *debugfs_root;
};
#define ENETC_SI_ALIGN 32
@@ -341,6 +397,8 @@ enum enetc_active_offloads {
ENETC_F_QBV = BIT(9),
ENETC_F_QCI = BIT(10),
ENETC_F_QBU = BIT(11),
+ ENETC_F_TXCSUM = BIT(12),
+ ENETC_F_LSO = BIT(13),
};
enum enetc_flags_bit {
@@ -375,6 +433,7 @@ struct enetc_ndev_priv {
u16 msg_enable;
u8 preemptible_tcs;
+ u8 max_frags; /* The maximum number of BDs for fragments */
enum enetc_active_offloads active_offloads;
@@ -441,6 +500,9 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
void enetc_free_si_resources(struct enetc_ndev_priv *priv);
int enetc_configure_si(struct enetc_ndev_priv *priv);
int enetc_get_driver_data(struct enetc_si *si);
+void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr);
+void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter);
int enetc_open(struct net_device *ndev);
int enetc_close(struct net_device *ndev);
@@ -456,6 +518,12 @@ int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
+int enetc_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config);
+int enetc_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+
/* ethtool */
extern const struct ethtool_ops enetc_pf_ethtool_ops;
extern const struct ethtool_ops enetc4_pf_ethtool_ops;
@@ -468,15 +536,19 @@ void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
struct enetc_cbdr *cbdr);
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
+int enetc4_setup_cbdr(struct enetc_si *si);
+void enetc4_teardown_cbdr(struct enetc_si *si);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index);
-void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
+void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes);
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count);
+int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count);
static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
struct enetc_cbd *cbd,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c
new file mode 100644
index 000000000000..1b1591dce73d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright 2025 NXP */
+
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+
+#include "enetc_pf.h"
+#include "enetc4_debugfs.h"
+
+static void enetc_show_si_mac_hash_filter(struct seq_file *s, int i)
+{
+ struct enetc_si *si = s->private;
+ struct enetc_hw *hw = &si->hw;
+ u32 hash_h, hash_l;
+
+ hash_l = enetc_port_rd(hw, ENETC4_PSIUMHFR0(i));
+ hash_h = enetc_port_rd(hw, ENETC4_PSIUMHFR1(i));
+ seq_printf(s, "SI %d unicast MAC hash filter: 0x%08x%08x\n",
+ i, hash_h, hash_l);
+
+ hash_l = enetc_port_rd(hw, ENETC4_PSIMMHFR0(i));
+ hash_h = enetc_port_rd(hw, ENETC4_PSIMMHFR1(i));
+ seq_printf(s, "SI %d multicast MAC hash filter: 0x%08x%08x\n",
+ i, hash_h, hash_l);
+}
+
+static int enetc_mac_filter_show(struct seq_file *s, void *data)
+{
+ struct enetc_si *si = s->private;
+ struct enetc_hw *hw = &si->hw;
+ struct maft_entry_data maft;
+ struct enetc_pf *pf;
+ int i, err, num_si;
+ u32 val;
+
+ pf = enetc_si_priv(si);
+ num_si = pf->caps.num_vsi + 1;
+
+ val = enetc_port_rd(hw, ENETC4_PSIPMMR);
+ for (i = 0; i < num_si; i++) {
+ seq_printf(s, "SI %d Unicast Promiscuous mode: %s\n", i,
+ str_enabled_disabled(PSIPMMR_SI_MAC_UP(i) & val));
+ seq_printf(s, "SI %d Multicast Promiscuous mode: %s\n", i,
+ str_enabled_disabled(PSIPMMR_SI_MAC_MP(i) & val));
+ }
+
+ /* MAC hash filter table */
+ for (i = 0; i < num_si; i++)
+ enetc_show_si_mac_hash_filter(s, i);
+
+ if (!pf->num_mfe)
+ return 0;
+
+ /* MAC address filter table */
+ seq_puts(s, "MAC address filter table\n");
+ for (i = 0; i < pf->num_mfe; i++) {
+ memset(&maft, 0, sizeof(maft));
+ err = ntmp_maft_query_entry(&si->ntmp_user, i, &maft);
+ if (err)
+ return err;
+
+ seq_printf(s, "Entry %d, MAC: %pM, SI bitmap: 0x%04x\n", i,
+ maft.keye.mac_addr, le16_to_cpu(maft.cfge.si_bitmap));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(enetc_mac_filter);
+
+void enetc_create_debugfs(struct enetc_si *si)
+{
+ struct net_device *ndev = si->ndev;
+ struct dentry *root;
+
+ root = debugfs_create_dir(netdev_name(ndev), NULL);
+ if (IS_ERR(root))
+ return;
+
+ si->debugfs_root = root;
+
+ debugfs_create_file("mac_filter", 0444, root, si, &enetc_mac_filter_fops);
+}
+
+void enetc_remove_debugfs(struct enetc_si *si)
+{
+ debugfs_remove(si->debugfs_root);
+ si->debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h
new file mode 100644
index 000000000000..96caca35f79d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2025 NXP */
+
+#ifndef __ENETC4_DEBUGFS_H
+#define __ENETC4_DEBUGFS_H
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+void enetc_create_debugfs(struct enetc_si *si);
+void enetc_remove_debugfs(struct enetc_si *si);
+#else
+static inline void enetc_create_debugfs(struct enetc_si *si)
+{
+}
+
+static inline void enetc_remove_debugfs(struct enetc_si *si)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
index 26b220677448..aa25b445d301 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -12,6 +12,29 @@
#define NXP_ENETC_VENDOR_ID 0x1131
#define NXP_ENETC_PF_DEV_ID 0xe101
+/**********************Station interface registers************************/
+/* Station interface LSO segmentation flag mask register 0/1 */
+#define ENETC4_SILSOSFMR0 0x1300
+#define SILSOSFMR0_TCP_MID_SEG GENMASK(27, 16)
+#define SILSOSFMR0_TCP_1ST_SEG GENMASK(11, 0)
+#define SILSOSFMR0_VAL_SET(first, mid) (FIELD_PREP(SILSOSFMR0_TCP_MID_SEG, mid) | \
+ FIELD_PREP(SILSOSFMR0_TCP_1ST_SEG, first))
+
+#define ENETC4_SILSOSFMR1 0x1304
+#define SILSOSFMR1_TCP_LAST_SEG GENMASK(11, 0)
+#define ENETC4_TCP_FLAGS_FIN BIT(0)
+#define ENETC4_TCP_FLAGS_SYN BIT(1)
+#define ENETC4_TCP_FLAGS_RST BIT(2)
+#define ENETC4_TCP_FLAGS_PSH BIT(3)
+#define ENETC4_TCP_FLAGS_ACK BIT(4)
+#define ENETC4_TCP_FLAGS_URG BIT(5)
+#define ENETC4_TCP_FLAGS_ECE BIT(6)
+#define ENETC4_TCP_FLAGS_CWR BIT(7)
+#define ENETC4_TCP_FLAGS_NS BIT(8)
+/* According to tso_build_hdr(), clear all special flags for not last packet. */
+#define ENETC4_TCP_NL_SEG_FLAGS_DMASK (ENETC4_TCP_FLAGS_FIN | \
+ ENETC4_TCP_FLAGS_RST | ENETC4_TCP_FLAGS_PSH)
+
/***************************ENETC port registers**************************/
#define ENETC4_ECAPR0 0x0
#define ECAPR0_RFS BIT(2)
@@ -76,6 +99,18 @@
#define ENETC4_PSICFGR2(a) ((a) * 0x80 + 0x2018)
#define PSICFGR2_NUM_MSIX GENMASK(5, 0)
+/* Port station interface a unicast MAC hash filter register 0/1 */
+#define ENETC4_PSIUMHFR0(a) ((a) * 0x80 + 0x2050)
+#define ENETC4_PSIUMHFR1(a) ((a) * 0x80 + 0x2054)
+
+/* Port station interface a multicast MAC hash filter register 0/1 */
+#define ENETC4_PSIMMHFR0(a) ((a) * 0x80 + 0x2058)
+#define ENETC4_PSIMMHFR1(a) ((a) * 0x80 + 0x205c)
+
+/* Port station interface a VLAN hash filter register 0/1 */
+#define ENETC4_PSIVHFR0(a) ((a) * 0x80 + 0x2060)
+#define ENETC4_PSIVHFR1(a) ((a) * 0x80 + 0x2064)
+
#define ENETC4_PMCAPR 0x4004
#define PMCAPR_HD BIT(8)
#define PMCAPR_FP GENMASK(10, 9)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
index fc41078c4f5d..b3dc1afeefd1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -8,9 +8,19 @@
#include <linux/unaligned.h>
#include "enetc_pf_common.h"
+#include "enetc4_debugfs.h"
#define ENETC_SI_MAX_RING_NUM 8
+#define ENETC_MAC_FILTER_TYPE_UC BIT(0)
+#define ENETC_MAC_FILTER_TYPE_MC BIT(1)
+#define ENETC_MAC_FILTER_TYPE_ALL (ENETC_MAC_FILTER_TYPE_UC | \
+ ENETC_MAC_FILTER_TYPE_MC)
+
+struct enetc_mac_addr {
+ u8 addr[ETH_ALEN];
+};
+
static void enetc4_get_port_caps(struct enetc_pf *pf)
{
struct enetc_hw *hw = &pf->si->hw;
@@ -26,6 +36,9 @@ static void enetc4_get_port_caps(struct enetc_pf *pf)
val = enetc_port_rd(hw, ENETC4_PMCAPR);
pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0;
+
+ val = enetc_port_rd(hw, ENETC4_PSIMAFCAPR);
+ pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE;
}
static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si,
@@ -56,6 +69,200 @@ static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si,
put_unaligned_le16(lower, addr + 4);
}
+static void enetc4_pf_set_si_mac_promisc(struct enetc_hw *hw, int si,
+ bool uc_promisc, bool mc_promisc)
+{
+ u32 val = enetc_port_rd(hw, ENETC4_PSIPMMR);
+
+ if (uc_promisc)
+ val |= PSIPMMR_SI_MAC_UP(si);
+ else
+ val &= ~PSIPMMR_SI_MAC_UP(si);
+
+ if (mc_promisc)
+ val |= PSIPMMR_SI_MAC_MP(si);
+ else
+ val &= ~PSIPMMR_SI_MAC_MP(si);
+
+ enetc_port_wr(hw, ENETC4_PSIPMMR, val);
+}
+
+static void enetc4_pf_set_si_uc_hash_filter(struct enetc_hw *hw, int si,
+ u64 hash)
+{
+ enetc_port_wr(hw, ENETC4_PSIUMHFR0(si), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC4_PSIUMHFR1(si), upper_32_bits(hash));
+}
+
+static void enetc4_pf_set_si_mc_hash_filter(struct enetc_hw *hw, int si,
+ u64 hash)
+{
+ enetc_port_wr(hw, ENETC4_PSIMMHFR0(si), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC4_PSIMMHFR1(si), upper_32_bits(hash));
+}
+
+static void enetc4_pf_set_loopback(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+ u32 val;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(val, en ? 1 : 0, PM_CMD_CFG_LOOP_EN);
+ /* Default to select MAC level loopback mode if loopback is enabled. */
+ val = u32_replace_bits(val, en ? LPBCK_MODE_MAC_LEVEL : 0,
+ PM_CMD_CFG_LPBK_MODE);
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_pf_clear_maft_entries(struct enetc_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_mfe; i++)
+ ntmp_maft_delete_entry(&pf->si->ntmp_user, i);
+
+ pf->num_mfe = 0;
+}
+
+static int enetc4_pf_add_maft_entries(struct enetc_pf *pf,
+ struct enetc_mac_addr *mac,
+ int mac_cnt)
+{
+ struct maft_entry_data maft = {};
+ u16 si_bit = BIT(0);
+ int i, err;
+
+ maft.cfge.si_bitmap = cpu_to_le16(si_bit);
+ for (i = 0; i < mac_cnt; i++) {
+ ether_addr_copy(maft.keye.mac_addr, mac[i].addr);
+ err = ntmp_maft_add_entry(&pf->si->ntmp_user, i, &maft);
+ if (unlikely(err)) {
+ pf->num_mfe = i;
+ goto clear_maft_entries;
+ }
+ }
+
+ pf->num_mfe = mac_cnt;
+
+ return 0;
+
+clear_maft_entries:
+ enetc4_pf_clear_maft_entries(pf);
+
+ return err;
+}
+
+static int enetc4_pf_set_uc_exact_filter(struct enetc_pf *pf)
+{
+ int max_num_mfe = pf->caps.mac_filter_num;
+ struct enetc_mac_filter mac_filter = {};
+ struct net_device *ndev = pf->si->ndev;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct enetc_mac_addr *mac_tbl;
+ struct netdev_hw_addr *ha;
+ int i = 0, err;
+ int mac_cnt;
+
+ netif_addr_lock_bh(ndev);
+
+ mac_cnt = netdev_uc_count(ndev);
+ if (!mac_cnt) {
+ netif_addr_unlock_bh(ndev);
+ /* clear both MAC hash and exact filters */
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+ enetc4_pf_clear_maft_entries(pf);
+
+ return 0;
+ }
+
+ if (mac_cnt > max_num_mfe) {
+ err = -ENOSPC;
+ goto unlock_netif_addr;
+ }
+
+ mac_tbl = kcalloc(mac_cnt, sizeof(*mac_tbl), GFP_ATOMIC);
+ if (!mac_tbl) {
+ err = -ENOMEM;
+ goto unlock_netif_addr;
+ }
+
+ netdev_for_each_uc_addr(ha, ndev) {
+ enetc_add_mac_addr_ht_filter(&mac_filter, ha->addr);
+ ether_addr_copy(mac_tbl[i++].addr, ha->addr);
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Set temporary unicast hash filters in case of Rx loss when
+ * updating MAC address filter table
+ */
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, *mac_filter.mac_hash_table);
+ enetc4_pf_clear_maft_entries(pf);
+
+ if (!enetc4_pf_add_maft_entries(pf, mac_tbl, i))
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+
+ kfree(mac_tbl);
+
+ return 0;
+
+unlock_netif_addr:
+ netif_addr_unlock_bh(ndev);
+
+ return err;
+}
+
+static void enetc4_pf_set_mac_hash_filter(struct enetc_pf *pf, int type)
+{
+ struct net_device *ndev = pf->si->ndev;
+ struct enetc_mac_filter *mac_filter;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(ndev);
+ if (type & ENETC_MAC_FILTER_TYPE_UC) {
+ mac_filter = &pf->mac_filter[UC];
+ enetc_reset_mac_addr_filter(mac_filter);
+ netdev_for_each_uc_addr(ha, ndev)
+ enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
+
+ enetc4_pf_set_si_uc_hash_filter(hw, 0,
+ *mac_filter->mac_hash_table);
+ }
+
+ if (type & ENETC_MAC_FILTER_TYPE_MC) {
+ mac_filter = &pf->mac_filter[MC];
+ enetc_reset_mac_addr_filter(mac_filter);
+ netdev_for_each_mc_addr(ha, ndev)
+ enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
+
+ enetc4_pf_set_si_mc_hash_filter(hw, 0,
+ *mac_filter->mac_hash_table);
+ }
+ netif_addr_unlock_bh(ndev);
+}
+
+static void enetc4_pf_set_mac_filter(struct enetc_pf *pf, int type)
+{
+ /* Currently, the MAC address filter table (MAFT) only has 4 entries,
+ * and multiple multicast addresses for filtering will be configured
+ * in the default network configuration, so MAFT is only suitable for
+ * unicast filtering. If the number of unicast addresses exceeds the
+ * table capacity, the MAC hash filter will be used.
+ */
+ if (type & ENETC_MAC_FILTER_TYPE_UC && enetc4_pf_set_uc_exact_filter(pf)) {
+ /* Fall back to the MAC hash filter */
+ enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_UC);
+ /* Clear the old MAC exact filter */
+ enetc4_pf_clear_maft_entries(pf);
+ }
+
+ if (type & ENETC_MAC_FILTER_TYPE_MC)
+ enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_MC);
+}
+
static const struct enetc_pf_ops enetc4_pf_ops = {
.set_si_primary_mac = enetc4_pf_set_si_primary_mac,
.get_si_primary_mac = enetc4_pf_get_si_primary_mac,
@@ -226,24 +433,6 @@ static void enetc4_set_trx_frame_size(struct enetc_pf *pf)
enetc4_pf_reset_tc_msdu(&si->hw);
}
-static void enetc4_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
-{
- int i;
-
- for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- enetc_port_wr(hw, ENETC4_PRSSKR(i), ((u32 *)bytes)[i]);
-}
-
-static void enetc4_set_default_rss_key(struct enetc_pf *pf)
-{
- u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0};
- struct enetc_hw *hw = &pf->si->hw;
-
- /* set up hash key */
- get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
- enetc4_set_rss_key(hw, hash_key);
-}
-
static void enetc4_enable_trx(struct enetc_pf *pf)
{
struct enetc_hw *hw = &pf->si->hw;
@@ -256,10 +445,25 @@ static void enetc4_configure_port(struct enetc_pf *pf)
{
enetc4_configure_port_si(pf);
enetc4_set_trx_frame_size(pf);
- enetc4_set_default_rss_key(pf);
+ enetc_set_default_rss_key(pf);
enetc4_enable_trx(pf);
}
+static int enetc4_init_ntmp_user(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+
+ /* For ENETC 4.1, all table versions are 0 */
+ memset(&user->tbl, 0, sizeof(user->tbl));
+
+ return enetc4_setup_cbdr(si);
+}
+
+static void enetc4_free_ntmp_user(struct enetc_si *si)
+{
+ enetc4_teardown_cbdr(si);
+}
+
static int enetc4_pf_init(struct enetc_pf *pf)
{
struct device *dev = &pf->si->pdev->dev;
@@ -272,17 +476,99 @@ static int enetc4_pf_init(struct enetc_pf *pf)
return err;
}
+ err = enetc4_init_ntmp_user(pf->si);
+ if (err) {
+ dev_err(dev, "Failed to init CBDR\n");
+ return err;
+ }
+
enetc4_configure_port(pf);
return 0;
}
+static void enetc4_pf_free(struct enetc_pf *pf)
+{
+ enetc4_free_ntmp_user(pf->si);
+}
+
+static void enetc4_psi_do_set_rx_mode(struct work_struct *work)
+{
+ struct enetc_si *si = container_of(work, struct enetc_si, rx_mode_task);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct net_device *ndev = si->ndev;
+ struct enetc_hw *hw = &si->hw;
+ bool uc_promisc = false;
+ bool mc_promisc = false;
+ int type = 0;
+
+ rtnl_lock();
+
+ if (ndev->flags & IFF_PROMISC) {
+ uc_promisc = true;
+ mc_promisc = true;
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ mc_promisc = true;
+ type = ENETC_MAC_FILTER_TYPE_UC;
+ } else {
+ type = ENETC_MAC_FILTER_TYPE_ALL;
+ }
+
+ enetc4_pf_set_si_mac_promisc(hw, 0, uc_promisc, mc_promisc);
+
+ if (uc_promisc) {
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+ enetc4_pf_clear_maft_entries(pf);
+ }
+
+ if (mc_promisc)
+ enetc4_pf_set_si_mc_hash_filter(hw, 0, 0);
+
+ /* Set new MAC filter */
+ enetc4_pf_set_mac_filter(pf, type);
+
+ rtnl_unlock();
+}
+
+static void enetc4_pf_set_rx_mode(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+
+ queue_work(si->workqueue, &si->rx_mode_task);
+}
+
+static int enetc4_pf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ bool promisc_en = !(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+
+ enetc4_pf_set_si_vlan_promisc(hw, 0, promisc_en);
+ }
+
+ if (changed & NETIF_F_LOOPBACK)
+ enetc4_pf_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
static const struct net_device_ops enetc4_ndev_ops = {
.ndo_open = enetc_open,
.ndo_stop = enetc_close,
.ndo_start_xmit = enetc_xmit,
.ndo_get_stats = enetc_get_stats,
.ndo_set_mac_address = enetc_pf_set_mac_addr,
+ .ndo_set_rx_mode = enetc4_pf_set_rx_mode,
+ .ndo_set_features = enetc4_pf_set_features,
+ .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
};
static struct phylink_pcs *
@@ -617,6 +903,19 @@ static void enetc4_link_deinit(struct enetc_ndev_priv *priv)
enetc_mdiobus_destroy(pf);
}
+static int enetc4_psi_wq_task_init(struct enetc_si *si)
+{
+ char wq_name[24];
+
+ INIT_WORK(&si->rx_mode_task, enetc4_psi_do_set_rx_mode);
+ snprintf(wq_name, sizeof(wq_name), "enetc-%s", pci_name(si->pdev));
+ si->workqueue = create_singlethread_workqueue(wq_name);
+ if (!si->workqueue)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int enetc4_pf_netdev_create(struct enetc_si *si)
{
struct device *dev = &si->pdev->dev;
@@ -657,6 +956,12 @@ static int enetc4_pf_netdev_create(struct enetc_si *si)
if (err)
goto err_link_init;
+ err = enetc4_psi_wq_task_init(si);
+ if (err) {
+ dev_err(dev, "Failed to init workqueue\n");
+ goto err_wq_init;
+ }
+
err = register_netdev(ndev);
if (err) {
dev_err(dev, "Failed to register netdev\n");
@@ -666,13 +971,14 @@ static int enetc4_pf_netdev_create(struct enetc_si *si)
return 0;
err_reg_netdev:
+ destroy_workqueue(si->workqueue);
+err_wq_init:
enetc4_link_deinit(priv);
err_link_init:
enetc_free_msix(priv);
err_alloc_msix:
err_config_si:
err_clk_get:
- mutex_destroy(&priv->mm_lock);
free_netdev(ndev);
return err;
@@ -684,10 +990,18 @@ static void enetc4_pf_netdev_destroy(struct enetc_si *si)
struct net_device *ndev = si->ndev;
unregister_netdev(ndev);
+ cancel_work(&si->rx_mode_task);
+ destroy_workqueue(si->workqueue);
+ enetc4_link_deinit(priv);
enetc_free_msix(priv);
free_netdev(ndev);
}
+static const struct enetc_si_ops enetc4_psi_ops = {
+ .get_rss_table = enetc4_get_rss_table,
+ .set_rss_table = enetc4_set_rss_table,
+};
+
static int enetc4_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -712,6 +1026,7 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
"Couldn't map PF only space\n");
si->revision = enetc_get_ip_revision(&si->hw);
+ si->ops = &enetc4_psi_ops;
err = enetc_get_driver_data(si);
if (err)
return dev_err_probe(dev, err,
@@ -728,14 +1043,28 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
enetc_get_si_caps(si);
- return enetc4_pf_netdev_create(si);
+ err = enetc4_pf_netdev_create(si);
+ if (err)
+ goto err_netdev_create;
+
+ enetc_create_debugfs(si);
+
+ return 0;
+
+err_netdev_create:
+ enetc4_pf_free(pf);
+
+ return err;
}
static void enetc4_pf_remove(struct pci_dev *pdev)
{
struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ enetc_remove_debugfs(si);
enetc4_pf_netdev_destroy(si);
+ enetc4_pf_free(pf);
}
static const struct pci_device_id enetc4_pf_id_table[] = {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 20bfdf7fb4b4..3d5f31879d5c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -60,6 +60,44 @@ void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
}
EXPORT_SYMBOL_GPL(enetc_teardown_cbdr);
+int enetc4_setup_cbdr(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+ struct device *dev = &si->pdev->dev;
+ struct enetc_hw *hw = &si->hw;
+ struct netc_cbdr_regs regs;
+
+ user->cbdr_num = 1;
+ user->dev = dev;
+ user->ring = devm_kcalloc(dev, user->cbdr_num,
+ sizeof(struct netc_cbdr), GFP_KERNEL);
+ if (!user->ring)
+ return -ENOMEM;
+
+ /* set CBDR cache attributes */
+ enetc_wr(hw, ENETC_SICAR2,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+ regs.pir = hw->reg + ENETC_SICBDRPIR;
+ regs.cir = hw->reg + ENETC_SICBDRCIR;
+ regs.mr = hw->reg + ENETC_SICBDRMR;
+ regs.bar0 = hw->reg + ENETC_SICBDRBAR0;
+ regs.bar1 = hw->reg + ENETC_SICBDRBAR1;
+ regs.lenr = hw->reg + ENETC_SICBDRLENR;
+
+ return ntmp_init_cbdr(user->ring, dev, &regs);
+}
+EXPORT_SYMBOL_GPL(enetc4_setup_cbdr);
+
+void enetc4_teardown_cbdr(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+
+ ntmp_free_cbdr(user->ring);
+ user->dev = NULL;
+}
+EXPORT_SYMBOL_GPL(enetc4_teardown_cbdr);
+
static void enetc_clean_cbdr(struct enetc_cbdr *ring)
{
struct enetc_cbd *dest_cbd;
@@ -256,3 +294,15 @@ int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
return enetc_cmd_rss_table(si, (u32 *)table, count, false);
}
EXPORT_SYMBOL_GPL(enetc_set_rss_table);
+
+int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count)
+{
+ return ntmp_rsst_query_entry(&si->ntmp_user, table, count);
+}
+EXPORT_SYMBOL_GPL(enetc4_get_rss_table);
+
+int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count)
+{
+ return ntmp_rsst_update_entry(&si->ntmp_user, table, count);
+}
+EXPORT_SYMBOL_GPL(enetc4_set_rss_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index bf34b5bb1e35..d38cd36be4a6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -141,7 +141,7 @@ static const struct {
static const struct {
int reg;
- char name[ETH_GSTRING_LEN];
+ char name[ETH_GSTRING_LEN] __nonstring;
} enetc_port_counters[] = {
{ ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
{ ENETC_PM_RALN(0), "MAC rx alignment errors" },
@@ -264,7 +264,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
break;
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
- ethtool_puts(&data, enetc_port_counters[i].name);
+ ethtool_cpy(&data, enetc_port_counters[i].name);
break;
}
@@ -625,6 +625,29 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
return 0;
}
+/* i.MX95 ENETC does not support RFS table, but we can use ingress port
+ * filter table to implement Wake-on-LAN filter or drop the matched flow,
+ * so the implementation will be different from enetc_get_rxnfc() and
+ * enetc_set_rxnfc(). Therefore, add enetc4_get_rxnfc() for ENETC v4 PF.
+ */
+static int enetc4_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
+ u32 *rule_locs)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = priv->num_rx_rings;
+ break;
+ case ETHTOOL_GRXFH:
+ return enetc_get_rsshash(rxnfc);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -677,36 +700,53 @@ static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
return priv->si->num_rss;
}
+static int enetc_get_rss_key_base(struct enetc_si *si)
+{
+ if (is_enetc_rev1(si))
+ return ENETC_PRSSK(0);
+
+ return ENETC4_PRSSKR(0);
+}
+
+static void enetc_get_rss_key(struct enetc_si *si, const u8 *key)
+{
+ int base = enetc_get_rss_key_base(si);
+ struct enetc_hw *hw = &si->hw;
+ int i;
+
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ ((u32 *)key)[i] = enetc_port_rd(hw, base + i * 4);
+}
+
static int enetc_get_rxfh(struct net_device *ndev,
struct ethtool_rxfh_param *rxfh)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- int err = 0, i;
+ struct enetc_si *si = priv->si;
+ int err = 0;
/* return hash function */
rxfh->hfunc = ETH_RSS_HASH_TOP;
/* return hash key */
- if (rxfh->key && hw->port)
- for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- ((u32 *)rxfh->key)[i] = enetc_port_rd(hw,
- ENETC_PRSSK(i));
+ if (rxfh->key && enetc_si_is_pf(si))
+ enetc_get_rss_key(si, rxfh->key);
/* return RSS table */
if (rxfh->indir)
- err = enetc_get_rss_table(priv->si, rxfh->indir,
- priv->si->num_rss);
+ err = si->ops->get_rss_table(si, rxfh->indir, si->num_rss);
return err;
}
-void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
+void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes)
{
+ int base = enetc_get_rss_key_base(si);
+ struct enetc_hw *hw = &si->hw;
int i;
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
+ enetc_port_wr(hw, base + i * 4, ((u32 *)bytes)[i]);
}
EXPORT_SYMBOL_GPL(enetc_set_rss_key);
@@ -715,17 +755,16 @@ static int enetc_set_rxfh(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
int err = 0;
/* set hash key, if PF */
- if (rxfh->key && hw->port)
- enetc_set_rss_key(hw, rxfh->key);
+ if (rxfh->key && enetc_si_is_pf(si))
+ enetc_set_rss_key(si, rxfh->key);
/* set RSS table */
if (rxfh->indir)
- err = enetc_set_rss_table(priv->si, rxfh->indir,
- priv->si->num_rss);
+ err = si->ops->set_rss_table(si, rxfh->indir, si->num_rss);
return err;
}
@@ -832,6 +871,7 @@ static int enetc_set_coalesce(struct net_device *ndev,
static int enetc_get_ts_info(struct net_device *ndev,
struct kernel_ethtool_ts_info *info)
{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
int *phc_idx;
phc_idx = symbol_get(enetc_phc_index);
@@ -852,8 +892,10 @@ static int enetc_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON) |
- (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+ (1 << HWTSTAMP_TX_ON);
+
+ if (enetc_si_is_pf(priv->si))
+ info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
@@ -1237,6 +1279,11 @@ const struct ethtool_ops enetc4_pf_ethtool_ops = {
.set_wol = enetc_set_wol,
.get_pauseparam = enetc_get_pauseparam,
.set_pauseparam = enetc_set_pauseparam,
+ .get_rxnfc = enetc4_get_rxnfc,
+ .get_rxfh_key_size = enetc_get_rxfh_key_size,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
};
void enetc_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 7c3285584f8a..53e8d18c7a34 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -23,11 +23,9 @@
#define ENETC_SICTR0 0x18
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
-#define ENETC_SIPCAPR0_PSFP BIT(9)
#define ENETC_SIPCAPR0_RSS BIT(8)
-#define ENETC_SIPCAPR0_QBV BIT(4)
-#define ENETC_SIPCAPR0_QBU BIT(3)
#define ENETC_SIPCAPR0_RFS BIT(2)
+#define ENETC_SIPCAPR0_LSO BIT(1)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
#define ENETC_SIRBGCR 0x38
@@ -194,6 +192,9 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PCAPR0 0x0900
#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24)
#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff)
+#define ENETC_PCAPR0_PSFP BIT(9)
+#define ENETC_PCAPR0_QBV BIT(4)
+#define ENETC_PCAPR0_QBU BIT(3)
#define ENETC_PCAPR1 0x0904
#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */
#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff)
@@ -506,7 +507,7 @@ static inline u64 _enetc_rd_reg64(void __iomem *reg)
tmp = ioread32(reg + 4);
} while (high != tmp);
- return le64_to_cpu((__le64)high << 32 | low);
+ return (u64)high << 32 | low;
}
#endif
@@ -554,11 +555,23 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
union enetc_tx_bd {
struct {
__le64 addr;
- __le16 buf_len;
+ union {
+ __le16 buf_len;
+ __le16 hdr_len; /* For LSO, ENETC 4.1 and later */
+ };
__le16 frm_len;
union {
struct {
- u8 reserved[3];
+ u8 l3_aux0;
+#define ENETC_TX_BD_L3_START GENMASK(6, 0)
+#define ENETC_TX_BD_IPCS BIT(7)
+ u8 l3_aux1;
+#define ENETC_TX_BD_L3_HDR_LEN GENMASK(6, 0)
+#define ENETC_TX_BD_L3T BIT(7)
+ u8 l4_aux;
+#define ENETC_TX_BD_L4T GENMASK(7, 5)
+#define ENETC_TXBD_L4T_UDP 1
+#define ENETC_TXBD_L4T_TCP 2
u8 flags;
}; /* default layout */
__le32 txstart;
@@ -569,23 +582,27 @@ union enetc_tx_bd {
__le32 tstamp;
__le16 tpid;
__le16 vid;
- u8 reserved[6];
+ __le16 lso_sg_size; /* For ENETC 4.1 and later */
+ __le16 frm_len_ext; /* For ENETC 4.1 and later */
+ u8 reserved[2];
u8 e_flags;
u8 flags;
} ext; /* Tx BD extension */
struct {
__le32 tstamp;
- u8 reserved[10];
+ u8 reserved[8];
+ __le16 lso_err_count; /* For ENETC 4.1 and later */
u8 status;
u8 flags;
} wb; /* writeback descriptor */
};
enum enetc_txbd_flags {
- ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_L4CS = BIT(0), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TSE = BIT(1),
+ ENETC_TXBD_FLAGS_LSO = BIT(1), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_W = BIT(2),
- ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_CSUM_LSO = BIT(3), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TXSTART = BIT(4),
ENETC_TXBD_FLAGS_EX = BIT(6),
ENETC_TXBD_FLAGS_F = BIT(7)
@@ -654,6 +671,8 @@ union enetc_rx_bd {
#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
#define ENETC_CBD_STATUS_MASK 0xf
+#define ENETC_TPID_8021Q 0
+
struct enetc_cmd_rfse {
u8 smac_h[6];
u8 smac_m[6];
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index c47b4a743d93..f63a29e2e031 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -72,30 +72,6 @@ static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos)
enetc_port_wr(hw, ENETC_PSIVLANR(si), val);
}
-static int enetc_mac_addr_hash_idx(const u8 *addr)
-{
- u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
- u64 mask = 0;
- int res = 0;
- int i;
-
- for (i = 0; i < 8; i++)
- mask |= BIT_ULL(i * 6);
-
- for (i = 0; i < 6; i++)
- res |= (hweight64(fold & (mask << i)) & 0x1) << i;
-
- return res;
-}
-
-static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
-{
- filter->mac_addr_cnt = 0;
-
- bitmap_zero(filter->mac_hash_table,
- ENETC_MADDR_HASH_TBL_SZ);
-}
-
static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
const unsigned char *addr)
{
@@ -104,16 +80,6 @@ static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
filter->mac_addr_cnt++;
}
-static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
- const unsigned char *addr)
-{
- int idx = enetc_mac_addr_hash_idx(addr);
-
- /* add hash table entry */
- __set_bit(idx, filter->mac_hash_table);
- filter->mac_addr_cnt++;
-}
-
static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
{
bool err = si->errata & ENETC_ERR_UCMCSWP;
@@ -250,67 +216,6 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
enetc_port_wr(hw, ENETC_PSIPMR, psipmr);
}
-static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
- unsigned long hash)
-{
- enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash));
- enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash));
-}
-
-static int enetc_vid_hash_idx(unsigned int vid)
-{
- int res = 0;
- int i;
-
- for (i = 0; i < 6; i++)
- res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
-
- return res;
-}
-
-static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
-{
- int i;
-
- if (rehash) {
- bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
-
- for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
- int hidx = enetc_vid_hash_idx(i);
-
- __set_bit(hidx, pf->vlan_ht_filter);
- }
- }
-
- enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter);
-}
-
-static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_pf *pf = enetc_si_priv(priv->si);
- int idx;
-
- __set_bit(vid, pf->active_vlans);
-
- idx = enetc_vid_hash_idx(vid);
- if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
- enetc_sync_vlan_ht_filter(pf, false);
-
- return 0;
-}
-
-static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_pf *pf = enetc_si_priv(priv->si);
-
- __clear_bit(vid, pf->active_vlans);
- enetc_sync_vlan_ht_filter(pf, true);
-
- return 0;
-}
-
static void enetc_set_loopback(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -409,6 +314,23 @@ static void enetc_port_assign_rfs_entries(struct enetc_si *si)
enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE);
}
+static void enetc_port_get_caps(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PCAPR0);
+
+ if (val & ENETC_PCAPR0_QBV)
+ si->hw_features |= ENETC_SI_F_QBV;
+
+ if (val & ENETC_PCAPR0_QBU)
+ si->hw_features |= ENETC_SI_F_QBU;
+
+ if (val & ENETC_PCAPR0_PSFP)
+ si->hw_features |= ENETC_SI_F_PSFP;
+}
+
static void enetc_port_si_configure(struct enetc_si *si)
{
struct enetc_pf *pf = enetc_si_priv(si);
@@ -416,6 +338,8 @@ static void enetc_port_si_configure(struct enetc_si *si)
int num_rings, i;
u32 val;
+ enetc_port_get_caps(si);
+
val = enetc_port_rd(hw, ENETC_PCAPR0);
num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val));
@@ -530,7 +454,6 @@ static void enetc_mac_enable(struct enetc_si *si, bool en)
static void enetc_configure_port(struct enetc_pf *pf)
{
- u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
struct enetc_hw *hw = &pf->si->hw;
enetc_configure_port_mac(pf->si);
@@ -538,8 +461,7 @@ static void enetc_configure_port(struct enetc_pf *pf)
enetc_port_si_configure(pf->si);
/* set up hash key */
- get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
- enetc_set_rss_key(hw, hash_key);
+ enetc_set_default_rss_key(pf);
/* split up RFS entries */
enetc_port_assign_rfs_entries(pf->si);
@@ -709,6 +631,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
static struct phylink_pcs *
@@ -920,6 +844,11 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
return enetc_ierb_register_pf(ierb_pdev, pdev);
}
+static const struct enetc_si_ops enetc_psi_ops = {
+ .get_rss_table = enetc_get_rss_table,
+ .set_rss_table = enetc_set_rss_table,
+};
+
static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
{
struct enetc_si *si;
@@ -939,6 +868,7 @@ static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
}
si->revision = enetc_get_ip_revision(&si->hw);
+ si->ops = &enetc_psi_ops;
err = enetc_get_driver_data(si);
if (err) {
dev_err(&pdev->dev, "Could not get PF driver data\n");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index a26a12863855..ae407e9e9ee7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -5,19 +5,8 @@
#include <linux/phylink.h>
#define ENETC_PF_NUM_RINGS 8
-
-enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE)
-#define ENETC_MADDR_HASH_TBL_SZ 64
-struct enetc_mac_filter {
- union {
- char mac_addr[ETH_ALEN];
- DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
- };
- int mac_addr_cnt;
-};
-
#define ENETC_VLAN_HT_SIZE 64
enum enetc_vf_flags {
@@ -34,6 +23,7 @@ struct enetc_port_caps {
int num_msix;
int num_rx_bdr;
int num_tx_bdr;
+ int mac_filter_num;
};
struct enetc_pf;
@@ -71,6 +61,8 @@ struct enetc_pf {
struct enetc_port_caps caps;
const struct enetc_pf_ops *ops;
+
+ int num_mfe; /* number of mac address filter table entries */
};
#define phylink_to_enetc_pf(config) \
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
index 0eecfc833164..edf14a95cab7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
@@ -101,6 +101,7 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
@@ -109,24 +110,32 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
ndev->priv_flags |= IFF_UNICAST_FLT;
- /* TODO: currently, i.MX95 ENETC driver does not support advanced features */
- if (!is_enetc_rev1(si)) {
- ndev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK);
- goto end;
- }
+ if (si->drvdata->tx_csum)
+ priv->active_offloads |= ENETC_F_TXCSUM;
- if (si->num_rss)
+ if (si->hw_features & ENETC_SI_F_LSO)
+ priv->active_offloads |= ENETC_F_LSO;
+
+ if (si->num_rss) {
ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features |= NETIF_F_RXHASH;
+ }
+
+ /* TODO: currently, i.MX95 ENETC driver does not support advanced features */
+ if (!is_enetc_rev1(si))
+ goto end;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
@@ -332,5 +341,86 @@ void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
}
EXPORT_SYMBOL_GPL(enetc_phylink_destroy);
+void enetc_set_default_rss_key(struct enetc_pf *pf)
+{
+ u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0};
+
+ /* set up hash key */
+ get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
+ enetc_set_rss_key(pf->si, hash_key);
+}
+EXPORT_SYMBOL_GPL(enetc_set_default_rss_key);
+
+static int enetc_vid_hash_idx(unsigned int vid)
+{
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
+
+ return res;
+}
+
+static void enetc_refresh_vlan_ht_filter(struct enetc_pf *pf)
+{
+ int i;
+
+ bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+ for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
+ int hidx = enetc_vid_hash_idx(i);
+
+ __set_bit(hidx, pf->vlan_ht_filter);
+ }
+}
+
+static void enetc_set_si_vlan_ht_filter(struct enetc_si *si,
+ int si_id, u64 hash)
+{
+ struct enetc_hw *hw = &si->hw;
+ int high_reg_off, low_reg_off;
+
+ if (is_enetc_rev1(si)) {
+ low_reg_off = ENETC_PSIVHFR0(si_id);
+ high_reg_off = ENETC_PSIVHFR1(si_id);
+ } else {
+ low_reg_off = ENETC4_PSIVHFR0(si_id);
+ high_reg_off = ENETC4_PSIVHFR1(si_id);
+ }
+
+ enetc_port_wr(hw, low_reg_off, lower_32_bits(hash));
+ enetc_port_wr(hw, high_reg_off, upper_32_bits(hash));
+}
+
+int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ int idx;
+
+ __set_bit(vid, pf->active_vlans);
+
+ idx = enetc_vid_hash_idx(vid);
+ if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
+ enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_vlan_rx_add_vid);
+
+int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (__test_and_clear_bit(vid, pf->active_vlans)) {
+ enetc_refresh_vlan_ht_filter(pf);
+ enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_vlan_rx_del_vid);
+
MODULE_DESCRIPTION("NXP ENETC PF common functionality driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
index 48f55ee743ad..96d4840a3107 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
@@ -12,6 +12,9 @@ void enetc_mdiobus_destroy(struct enetc_pf *pf);
int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
const struct phylink_mac_ops *ops);
void enetc_phylink_destroy(struct enetc_ndev_priv *priv);
+void enetc_set_default_rss_key(struct enetc_pf *pf);
+int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid);
+int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid);
static inline u16 enetc_get_ip_revision(struct enetc_hw *hw)
{
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index a5f8ce576b6e..6c4b374bcb0e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -121,6 +121,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_vf_setup_tc,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
@@ -136,6 +138,7 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
@@ -144,21 +147,30 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- if (si->num_rss)
+ if (si->num_rss) {
ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features |= NETIF_F_RXHASH;
+ }
/* pick up primary MAC address from SI */
enetc_load_primary_mac_addr(&si->hw, ndev);
}
+static const struct enetc_si_ops enetc_vsi_ops = {
+ .get_rss_table = enetc_get_rss_table,
+ .set_rss_table = enetc_set_rss_table,
+};
+
static int enetc_vf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -173,6 +185,7 @@ static int enetc_vf_probe(struct pci_dev *pdev,
si = pci_get_drvdata(pdev);
si->revision = ENETC_REV_1_0;
+ si->ops = &enetc_vsi_ops;
err = enetc_get_driver_data(si);
if (err) {
dev_err_probe(&pdev->dev, err,
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c
new file mode 100644
index 000000000000..ba32c1bbd9e1
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/ntmp.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * NETC NTMP (NETC Table Management Protocol) 2.0 Library
+ * Copyright 2025 NXP
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fsl/netc_global.h>
+#include <linux/iopoll.h>
+
+#include "ntmp_private.h"
+
+#define NETC_CBDR_TIMEOUT 1000 /* us */
+#define NETC_CBDR_DELAY_US 10
+#define NETC_CBDR_MR_EN BIT(31)
+
+#define NTMP_BASE_ADDR_ALIGN 128
+#define NTMP_DATA_ADDR_ALIGN 32
+
+/* Define NTMP Table ID */
+#define NTMP_MAFT_ID 1
+#define NTMP_RSST_ID 3
+
+/* Generic Update Actions for most tables */
+#define NTMP_GEN_UA_CFGEU BIT(0)
+#define NTMP_GEN_UA_STSEU BIT(1)
+
+#define NTMP_ENTRY_ID_SIZE 4
+#define RSST_ENTRY_NUM 64
+#define RSST_STSE_DATA_SIZE(n) ((n) * 8)
+#define RSST_CFGE_DATA_SIZE(n) (n)
+
+int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs)
+{
+ int cbd_num = NETC_CBDR_BD_NUM;
+ size_t size;
+
+ size = cbd_num * sizeof(union netc_cbd) + NTMP_BASE_ADDR_ALIGN;
+ cbdr->addr_base = dma_alloc_coherent(dev, size, &cbdr->dma_base,
+ GFP_KERNEL);
+ if (!cbdr->addr_base)
+ return -ENOMEM;
+
+ cbdr->dma_size = size;
+ cbdr->bd_num = cbd_num;
+ cbdr->regs = *regs;
+ cbdr->dev = dev;
+
+ /* The base address of the Control BD Ring must be 128 bytes aligned */
+ cbdr->dma_base_align = ALIGN(cbdr->dma_base, NTMP_BASE_ADDR_ALIGN);
+ cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
+ NTMP_BASE_ADDR_ALIGN);
+
+ cbdr->next_to_clean = 0;
+ cbdr->next_to_use = 0;
+ spin_lock_init(&cbdr->ring_lock);
+
+ /* Step 1: Configure the base address of the Control BD Ring */
+ netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
+ netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
+
+ /* Step 2: Configure the producer index register */
+ netc_write(cbdr->regs.pir, cbdr->next_to_clean);
+
+ /* Step 3: Configure the consumer index register */
+ netc_write(cbdr->regs.cir, cbdr->next_to_use);
+
+ /* Step4: Configure the number of BDs of the Control BD Ring */
+ netc_write(cbdr->regs.lenr, cbdr->bd_num);
+
+ /* Step 5: Enable the Control BD Ring */
+ netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ntmp_init_cbdr);
+
+void ntmp_free_cbdr(struct netc_cbdr *cbdr)
+{
+ /* Disable the Control BD Ring */
+ netc_write(cbdr->regs.mr, 0);
+ dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base,
+ cbdr->dma_base);
+ memset(cbdr, 0, sizeof(*cbdr));
+}
+EXPORT_SYMBOL_GPL(ntmp_free_cbdr);
+
+static int ntmp_get_free_cbd_num(struct netc_cbdr *cbdr)
+{
+ return (cbdr->next_to_clean - cbdr->next_to_use - 1 +
+ cbdr->bd_num) % cbdr->bd_num;
+}
+
+static union netc_cbd *ntmp_get_cbd(struct netc_cbdr *cbdr, int index)
+{
+ return &((union netc_cbd *)(cbdr->addr_base_align))[index];
+}
+
+static void ntmp_clean_cbdr(struct netc_cbdr *cbdr)
+{
+ union netc_cbd *cbd;
+ int i;
+
+ i = cbdr->next_to_clean;
+ while (netc_read(cbdr->regs.cir) != i) {
+ cbd = ntmp_get_cbd(cbdr, i);
+ memset(cbd, 0, sizeof(*cbd));
+ i = (i + 1) % cbdr->bd_num;
+ }
+
+ cbdr->next_to_clean = i;
+}
+
+static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
+{
+ union netc_cbd *cur_cbd;
+ struct netc_cbdr *cbdr;
+ int i, err;
+ u16 status;
+ u32 val;
+
+ /* Currently only i.MX95 ENETC is supported, and it only has one
+ * command BD ring
+ */
+ cbdr = &user->ring[0];
+
+ spin_lock_bh(&cbdr->ring_lock);
+
+ if (unlikely(!ntmp_get_free_cbd_num(cbdr)))
+ ntmp_clean_cbdr(cbdr);
+
+ i = cbdr->next_to_use;
+ cur_cbd = ntmp_get_cbd(cbdr, i);
+ *cur_cbd = *cbd;
+ dma_wmb();
+
+ /* Update producer index of both software and hardware */
+ i = (i + 1) % cbdr->bd_num;
+ cbdr->next_to_use = i;
+ netc_write(cbdr->regs.pir, i);
+
+ err = read_poll_timeout_atomic(netc_read, val, val == i,
+ NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT,
+ true, cbdr->regs.cir);
+ if (unlikely(err))
+ goto cbdr_unlock;
+
+ dma_rmb();
+ /* Get the writeback command BD, because the caller may need
+ * to check some other fields of the response header.
+ */
+ *cbd = *cur_cbd;
+
+ /* Check the writeback error status */
+ status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR;
+ if (unlikely(status)) {
+ err = -EIO;
+ dev_err(user->dev, "Command BD error: 0x%04x\n", status);
+ }
+
+ ntmp_clean_cbdr(cbdr);
+ dma_wmb();
+
+cbdr_unlock:
+ spin_unlock_bh(&cbdr->ring_lock);
+
+ return err;
+}
+
+static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align)
+{
+ void *buf;
+
+ buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
+ &data->dma, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ data->buf = buf;
+ *buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN);
+
+ return 0;
+}
+
+static void ntmp_free_data_mem(struct ntmp_dma_buf *data)
+{
+ dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
+ data->buf, data->dma);
+}
+
+static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma,
+ int len, int table_id, int cmd,
+ int access_method)
+{
+ dma_addr_t dma_align;
+
+ memset(cbd, 0, sizeof(*cbd));
+ dma_align = ALIGN(dma, NTMP_DATA_ADDR_ALIGN);
+ cbd->req_hdr.addr = cpu_to_le64(dma_align);
+ cbd->req_hdr.len = cpu_to_le32(len);
+ cbd->req_hdr.cmd = cmd;
+ cbd->req_hdr.access_method = FIELD_PREP(NTMP_ACCESS_METHOD,
+ access_method);
+ cbd->req_hdr.table_id = table_id;
+ cbd->req_hdr.ver_cci_rr = FIELD_PREP(NTMP_HDR_VERSION,
+ NTMP_HDR_VER2);
+ /* For NTMP version 2.0 or later version */
+ cbd->req_hdr.npf = cpu_to_le32(NTMP_NPF);
+}
+
+static void ntmp_fill_crd(struct ntmp_cmn_req_data *crd, u8 tblv,
+ u8 qa, u16 ua)
+{
+ crd->update_act = cpu_to_le16(ua);
+ crd->tblv_qact = NTMP_TBLV_QACT(tblv, qa);
+}
+
+static void ntmp_fill_crd_eid(struct ntmp_req_by_eid *rbe, u8 tblv,
+ u8 qa, u16 ua, u32 entry_id)
+{
+ ntmp_fill_crd(&rbe->crd, tblv, qa, ua);
+ rbe->entry_id = cpu_to_le32(entry_id);
+}
+
+static const char *ntmp_table_name(int tbl_id)
+{
+ switch (tbl_id) {
+ case NTMP_MAFT_ID:
+ return "MAC Address Filter Table";
+ case NTMP_RSST_ID:
+ return "RSS Table";
+ default:
+ return "Unknown Table";
+ };
+}
+
+static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id,
+ u8 tbl_ver, u32 entry_id, u32 req_len,
+ u32 resp_len)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = max(req_len, resp_len),
+ };
+ struct ntmp_req_by_eid *req;
+ union netc_cbd cbd;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id);
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len),
+ tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID);
+
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev,
+ "Failed to delete entry 0x%x of %s, err: %pe",
+ entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+
+static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
+ u32 len, struct ntmp_req_by_eid *req,
+ dma_addr_t dma, bool compare_eid)
+{
+ struct ntmp_cmn_resp_query *resp;
+ int cmd = NTMP_CMD_QUERY;
+ union netc_cbd cbd;
+ u32 entry_id;
+ int err;
+
+ entry_id = le32_to_cpu(req->entry_id);
+ if (le16_to_cpu(req->crd.update_act))
+ cmd = NTMP_CMD_QU;
+
+ /* Request header */
+ ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err) {
+ dev_err(user->dev,
+ "Failed to query entry 0x%x of %s, err: %pe\n",
+ entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
+ return err;
+ }
+
+ /* For a few tables, the first field of their response data is not
+ * entry_id, so directly return success.
+ */
+ if (!compare_eid)
+ return 0;
+
+ resp = (struct ntmp_cmn_resp_query *)req;
+ if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) {
+ dev_err(user->dev,
+ "%s: query EID 0x%x doesn't match response EID 0x%x\n",
+ ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = sizeof(struct maft_req_add),
+ };
+ struct maft_req_add *req;
+ union netc_cbd cbd;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set mac address filter table request data buffer */
+ ntmp_fill_crd_eid(&req->rbe, user->tbl.maft_ver, 0, 0, entry_id);
+ req->keye = maft->keye;
+ req->cfge = maft->cfge;
+
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n",
+ entry_id, ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_add_entry);
+
+int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = sizeof(struct maft_resp_query),
+ };
+ struct maft_resp_query *resp;
+ struct ntmp_req_by_eid *req;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id);
+ err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID,
+ NTMP_LEN(sizeof(*req), data.size),
+ req, data.dma, true);
+ if (err)
+ goto end;
+
+ resp = (struct maft_resp_query *)req;
+ maft->keye = resp->keye;
+ maft->cfge = resp->cfge;
+
+end:
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_query_entry);
+
+int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
+{
+ return ntmp_delete_entry_by_id(user, NTMP_MAFT_ID, user->tbl.maft_ver,
+ entry_id, NTMP_EID_REQ_LEN, 0);
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_delete_entry);
+
+int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
+ int count)
+{
+ struct ntmp_dma_buf data = {.dev = user->dev};
+ struct rsst_req_update *req;
+ union netc_cbd cbd;
+ int err, i;
+
+ if (count != RSST_ENTRY_NUM)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ data.size = struct_size(req, groups, count);
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set the request data buffer */
+ ntmp_fill_crd_eid(&req->rbe, user->tbl.rsst_ver, 0,
+ NTMP_GEN_UA_CFGEU | NTMP_GEN_UA_STSEU, 0);
+ for (i = 0; i < count; i++)
+ req->groups[i] = (u8)(table[i]);
+
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID);
+
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev, "Failed to update RSST entry, err: %pe\n",
+ ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_rsst_update_entry);
+
+int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
+{
+ struct ntmp_dma_buf data = {.dev = user->dev};
+ struct ntmp_req_by_eid *req;
+ union netc_cbd cbd;
+ int err, i;
+ u8 *group;
+
+ if (count != RSST_ENTRY_NUM)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) +
+ RSST_CFGE_DATA_SIZE(count);
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set the request data buffer */
+ ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0);
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size),
+ NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err) {
+ dev_err(user->dev, "Failed to query RSST entry, err: %pe\n",
+ ERR_PTR(err));
+ goto end;
+ }
+
+ group = (u8 *)req;
+ group += NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count);
+ for (i = 0; i < count; i++)
+ table[i] = group[i];
+
+end:
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_rsst_query_entry);
+
+MODULE_DESCRIPTION("NXP NETC Library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp_private.h b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
new file mode 100644
index 000000000000..34394e40fddd
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * NTMP table request and response data buffer formats
+ * Copyright 2025 NXP
+ */
+
+#ifndef __NTMP_PRIVATE_H
+#define __NTMP_PRIVATE_H
+
+#include <linux/bitfield.h>
+#include <linux/fsl/ntmp.h>
+
+#define NTMP_EID_REQ_LEN 8
+#define NETC_CBDR_BD_NUM 256
+
+union netc_cbd {
+ struct {
+ __le64 addr;
+ __le32 len;
+#define NTMP_RESP_LEN GENMASK(19, 0)
+#define NTMP_REQ_LEN GENMASK(31, 20)
+#define NTMP_LEN(req, resp) (FIELD_PREP(NTMP_REQ_LEN, (req)) | \
+ ((resp) & NTMP_RESP_LEN))
+ u8 cmd;
+#define NTMP_CMD_DELETE BIT(0)
+#define NTMP_CMD_UPDATE BIT(1)
+#define NTMP_CMD_QUERY BIT(2)
+#define NTMP_CMD_ADD BIT(3)
+#define NTMP_CMD_QU (NTMP_CMD_QUERY | NTMP_CMD_UPDATE)
+ u8 access_method;
+#define NTMP_ACCESS_METHOD GENMASK(7, 4)
+#define NTMP_AM_ENTRY_ID 0
+#define NTMP_AM_EXACT_KEY 1
+#define NTMP_AM_SEARCH 2
+#define NTMP_AM_TERNARY_KEY 3
+ u8 table_id;
+ u8 ver_cci_rr;
+#define NTMP_HDR_VERSION GENMASK(5, 0)
+#define NTMP_HDR_VER2 2
+#define NTMP_CCI BIT(6)
+#define NTMP_RR BIT(7)
+ __le32 resv[3];
+ __le32 npf;
+#define NTMP_NPF BIT(15)
+ } req_hdr; /* NTMP Request Message Header Format */
+
+ struct {
+ __le32 resv0[3];
+ __le16 num_matched;
+ __le16 error_rr;
+#define NTMP_RESP_ERROR GENMASK(11, 0)
+#define NTMP_RESP_RR BIT(15)
+ __le32 resv1[4];
+ } resp_hdr; /* NTMP Response Message Header Format */
+};
+
+struct ntmp_dma_buf {
+ struct device *dev;
+ size_t size;
+ void *buf;
+ dma_addr_t dma;
+};
+
+struct ntmp_cmn_req_data {
+ __le16 update_act;
+ u8 dbg_opt;
+ u8 tblv_qact;
+#define NTMP_QUERY_ACT GENMASK(3, 0)
+#define NTMP_TBL_VER GENMASK(7, 4)
+#define NTMP_TBLV_QACT(v, a) (FIELD_PREP(NTMP_TBL_VER, (v)) | \
+ ((a) & NTMP_QUERY_ACT))
+};
+
+struct ntmp_cmn_resp_query {
+ __le32 entry_id;
+};
+
+/* Generic structure for request data by entry ID */
+struct ntmp_req_by_eid {
+ struct ntmp_cmn_req_data crd;
+ __le32 entry_id;
+};
+
+/* MAC Address Filter Table Request Data Buffer Format of Add action */
+struct maft_req_add {
+ struct ntmp_req_by_eid rbe;
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+/* MAC Address Filter Table Response Data Buffer Format of Query action */
+struct maft_resp_query {
+ __le32 entry_id;
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+/* RSS Table Request Data Buffer Format of Update action */
+struct rsst_req_update {
+ struct ntmp_req_by_eid rbe;
+ u8 groups[];
+};
+
+#endif
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1cca0425d493..c81f2ea588f2 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -671,8 +671,6 @@ struct fec_enet_private {
unsigned int tx_time_itr;
unsigned int itr_clk_rate;
- /* tx lpi eee mode */
- struct ethtool_keee eee;
unsigned int clk_ref_rate;
/* ptp clock period in ns*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1b55047c0237..17e9bddb9ddd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -714,7 +714,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
txq->bd.cur = bdp;
/* Trigger transmission start */
- writel(0, txq->bd.reg_desc_active);
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active))
+ writel(0, txq->bd.reg_desc_active);
return 0;
}
@@ -840,6 +845,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len, total_len, data_left;
struct bufdesc *bdp = txq->bd.cur;
+ struct bufdesc *tmp_bdp;
+ struct bufdesc_ex *ebdp;
struct tso_t tso;
unsigned int index = 0;
int ret;
@@ -913,7 +920,34 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
return 0;
err_release:
- /* TODO: Release all used data descriptors for TSO */
+ /* Release all used data descriptors for TSO */
+ tmp_bdp = txq->bd.cur;
+
+ while (tmp_bdp != bdp) {
+ /* Unmap data buffers */
+ if (tmp_bdp->cbd_bufaddr &&
+ !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(tmp_bdp->cbd_bufaddr),
+ fec16_to_cpu(tmp_bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+
+ /* Clear standard buffer descriptor fields */
+ tmp_bdp->cbd_sc = 0;
+ tmp_bdp->cbd_datlen = 0;
+ tmp_bdp->cbd_bufaddr = 0;
+
+ /* Handle extended descriptor if enabled */
+ if (fep->bufdesc_ex) {
+ ebdp = (struct bufdesc_ex *)tmp_bdp;
+ ebdp->cbd_esc = 0;
+ }
+
+ tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd);
+ }
+
+ dev_kfree_skb_any(skb);
+
return ret;
}
@@ -1064,6 +1098,29 @@ static void fec_enet_enable_ring(struct net_device *ndev)
}
}
+/* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
+{
+ u32 val;
+
+ if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+ } else {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+ }
+}
+
/*
* This function is called to start or restart the FEC during a link
* change, transmit timeout, or to reconfigure the FEC. The network
@@ -1080,17 +1137,7 @@ fec_restart(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
- ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
+ fec_ctrl_reset(fep, false);
/*
* enet-mac reset will reset mac address registers too,
@@ -1344,22 +1391,7 @@ fec_stop(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
- } else {
- val = readl(fep->hwp + FEC_ECNTRL);
- val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
- writel(val, fep->hwp + FEC_ECNTRL);
- }
+ fec_ctrl_reset(fep, true);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1591,19 +1623,22 @@ static void fec_enet_tx(struct net_device *ndev, int budget)
fec_enet_tx_queue(ndev, i, budget);
}
-static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
struct bufdesc *bdp, int index)
{
struct page *new_page;
dma_addr_t phys_addr;
new_page = page_pool_dev_alloc_pages(rxq->page_pool);
- WARN_ON(!new_page);
- rxq->rx_skb_info[index].page = new_page;
+ if (unlikely(!new_page))
+ return -ENOMEM;
+ rxq->rx_skb_info[index].page = new_page;
rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+
+ return 0;
}
static u32
@@ -1698,6 +1733,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
int cpu = smp_processor_id();
struct xdp_buff xdp;
struct page *page;
+ __fec32 cbd_bufaddr;
u32 sub_len = 4;
#if !defined(CONFIG_M5272)
@@ -1766,12 +1802,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
index = fec_enet_get_bd_index(bdp, &rxq->bd);
page = rxq->rx_skb_info[index].page;
+ cbd_bufaddr = bdp->cbd_bufaddr;
+ if (fec_enet_update_cbd(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
+ fec32_to_cpu(cbd_bufaddr),
pkt_len,
DMA_FROM_DEVICE);
prefetch(page_address(page));
- fec_enet_update_cbd(rxq, bdp, index);
if (xdp_prog) {
xdp_buff_clear_frags_flag(&xdp);
@@ -2045,14 +2086,14 @@ static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
return us * (fep->clk_ref_rate / 1000) / 1000;
}
-static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer,
+ bool enable)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
unsigned int sleep_cycle, wake_cycle;
if (enable) {
- sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer);
wake_cycle = sleep_cycle;
} else {
sleep_cycle = 0;
@@ -2105,7 +2146,9 @@ static void fec_enet_adjust_link(struct net_device *ndev)
napi_enable(&fep->napi);
}
if (fep->quirks & FEC_QUIRK_HAS_EEE)
- fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
+ fec_enet_eee_mode_set(ndev,
+ phy_dev->eee_cfg.tx_lpi_timer,
+ phy_dev->enable_tx_lpi);
} else {
if (fep->link) {
netif_stop_queue(ndev);
@@ -3181,7 +3224,6 @@ static int
fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3189,8 +3231,6 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- edata->tx_lpi_timer = p->tx_lpi_timer;
-
return phy_ethtool_get_eee(ndev->phydev, edata);
}
@@ -3198,7 +3238,6 @@ static int
fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3206,8 +3245,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- p->tx_lpi_timer = edata->tx_lpi_timer;
-
return phy_ethtool_set_eee(ndev->phydev, edata);
}
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7f6b57432071..876d90832596 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -30,7 +30,6 @@
#include <linux/phy.h>
#include <linux/fec.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include "fec.h"
@@ -739,8 +738,8 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
- hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL);
- fep->perout_timer.function = fec_ptp_pps_perout_handler;
+ hrtimer_setup(&fep->perout_timer, fec_ptp_pps_perout_handler, CLOCK_REALTIME,
+ HRTIMER_MODE_REL);
irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index fb416d60dcd7..11887458f050 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2690,13 +2690,12 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
{
struct fman *fman;
struct device_node *fm_node, *muram_node;
+ void __iomem *base_addr;
struct resource *res;
u32 val, range[2];
int err, irq;
struct clk *clk;
u32 clk_rate;
- phys_addr_t phys_base_addr;
- resource_size_t mem_size;
fman = kzalloc(sizeof(*fman), GFP_KERNEL);
if (!fman)
@@ -2724,18 +2723,6 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
goto fman_node_put;
fman->dts_params.err_irq = err;
- /* Get the FM address */
- res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -EINVAL;
- dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
- __func__);
- goto fman_node_put;
- }
-
- phys_base_addr = res->start;
- mem_size = resource_size(res);
-
clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
err = PTR_ERR(clk);
@@ -2803,24 +2790,16 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
}
}
- fman->dts_params.res =
- devm_request_mem_region(&of_dev->dev, phys_base_addr,
- mem_size, "fman");
- if (!fman->dts_params.res) {
- err = -EBUSY;
- dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
- __func__);
- goto fman_free;
- }
-
- fman->dts_params.base_addr =
- devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
- if (!fman->dts_params.base_addr) {
- err = -ENOMEM;
+ base_addr = devm_platform_get_and_ioremap_resource(of_dev, 0, &res);
+ if (IS_ERR(base_addr)) {
+ err = PTR_ERR(base_addr);
dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
goto fman_free;
}
+ fman->dts_params.base_addr = base_addr;
+ fman->dts_params.res = res;
+
fman->dev = &of_dev->dev;
err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 85617bb94959..51402dff72c5 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -755,12 +755,12 @@ static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs)
return container_of(pcs, struct fman_mac, pcs);
}
-static void dtsec_pcs_get_state(struct phylink_pcs *pcs,
+static void dtsec_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct fman_mac *dtsec = pcs_to_dtsec(pcs);
- phylink_mii_c22_pcs_get_state(dtsec->tbidev, state);
+ phylink_mii_c22_pcs_get_state(dtsec->tbidev, neg_mode, state);
}
static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
@@ -1446,7 +1446,6 @@ int dtsec_initialization(struct mac_device *mac_dev,
goto _return_fm_mac_free;
}
dtsec->pcs.ops = &dtsec_pcs_ops;
- dtsec->pcs.neg_mode = true;
dtsec->pcs.poll = true;
supported = mac_dev->phylink_config.supported_interfaces;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 435138f4699d..bcbcad613512 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1647,20 +1647,11 @@ static void gfar_configure_serdes(struct net_device *dev)
*/
static int init_phy(struct net_device *dev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct gfar_private *priv = netdev_priv(dev);
phy_interface_t interface = priv->interface;
struct phy_device *phydev;
struct ethtool_keee edata;
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
-
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
@@ -1675,9 +1666,8 @@ static int init_phy(struct net_device *dev)
if (interface == PHY_INTERFACE_MODE_SGMII)
gfar_configure_serdes(dev);
- /* Remove any features not supported by the controller */
- linkmode_and(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT))
+ phy_set_max_speed(phydev, SPEED_100);
/* Add support for flow control */
phy_support_asym_pause(phydev);
@@ -2071,15 +2061,13 @@ static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
schedule_work(&priv->reset_task);
}
-static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int gfar_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en = 0;
break;
@@ -2092,7 +2080,7 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
if (priv->hwts_rx_en) {
priv->hwts_rx_en = 0;
@@ -2106,44 +2094,23 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
priv->hwts_rx_en = 1;
reset_gfar(netdev);
}
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
-static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int gfar_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
- config.flags = 0;
- config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- config.rx_filter = (priv->hwts_rx_en ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+ config->tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = priv->hwts_rx_en ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- if (cmd == SIOCSHWTSTAMP)
- return gfar_hwtstamp_set(dev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return gfar_hwtstamp_get(dev, rq);
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
+ return 0;
}
/* Interrupt Handler for Transmit complete */
@@ -3184,7 +3151,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_features = gfar_set_features,
.ndo_set_rx_mode = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
- .ndo_eth_ioctl = gfar_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_get_stats64 = gfar_get_stats64,
.ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
@@ -3192,6 +3159,8 @@ static const struct net_device_ops gfar_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = gfar_netpoll,
#endif
+ .ndo_hwtstamp_get = gfar_hwtstamp_get,
+ .ndo_hwtstamp_set = gfar_hwtstamp_set,
};
/* Set up the ethernet device structure, private data,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 6663c1768089..affd5a6c44e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/mii.h>
#include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -34,6 +34,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
@@ -132,7 +133,6 @@ static const struct ucc_geth_info ugeth_primary_info = {
.transmitFlowControl = 1,
.maxGroupAddrInHash = 4,
.maxIndAddrInHash = 4,
- .prel = 7,
.maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
.minFrameLength = 64,
.maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
@@ -1205,34 +1205,6 @@ static int init_mac_station_addr_regs(u8 address_byte_0,
return 0;
}
-static int init_check_frame_length_mode(int length_check,
- u32 __iomem *maccfg2_register)
-{
- u32 value = 0;
-
- value = in_be32(maccfg2_register);
-
- if (length_check)
- value |= MACCFG2_LC;
- else
- value &= ~MACCFG2_LC;
-
- out_be32(maccfg2_register, value);
- return 0;
-}
-
-static int init_preamble_length(u8 preamble_length,
- u32 __iomem *maccfg2_register)
-{
- if ((preamble_length < 3) || (preamble_length > 7))
- return -EINVAL;
-
- clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
- preamble_length << MACCFG2_PREL_SHIFT);
-
- return 0;
-}
-
static int init_rx_parameters(int reject_broadcast,
int receive_short_frames,
int promiscuous, u32 __iomem *upsmr_register)
@@ -1287,94 +1259,11 @@ static int init_min_frame_len(u16 min_frame_length,
return 0;
}
-static int adjust_enet_interface(struct ucc_geth_private *ugeth)
+static bool phy_interface_mode_is_reduced(phy_interface_t interface)
{
- struct ucc_geth_info *ug_info;
- struct ucc_geth __iomem *ug_regs;
- struct ucc_fast __iomem *uf_regs;
- int ret_val;
- u32 upsmr, maccfg2;
- u16 value;
-
- ugeth_vdbg("%s: IN", __func__);
-
- ug_info = ugeth->ug_info;
- ug_regs = ugeth->ug_regs;
- uf_regs = ugeth->uccf->uf_regs;
-
- /* Set MACCFG2 */
- maccfg2 = in_be32(&ug_regs->maccfg2);
- maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
- if ((ugeth->max_speed == SPEED_10) ||
- (ugeth->max_speed == SPEED_100))
- maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
- else if (ugeth->max_speed == SPEED_1000)
- maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
- maccfg2 |= ug_info->padAndCrc;
- out_be32(&ug_regs->maccfg2, maccfg2);
-
- /* Set UPSMR */
- upsmr = in_be32(&uf_regs->upsmr);
- upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
- UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
- upsmr |= UCC_GETH_UPSMR_RPM;
- switch (ugeth->max_speed) {
- case SPEED_10:
- upsmr |= UCC_GETH_UPSMR_R10M;
- fallthrough;
- case SPEED_100:
- if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
- upsmr |= UCC_GETH_UPSMR_RMM;
- }
- }
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- upsmr |= UCC_GETH_UPSMR_TBIM;
- }
- if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
- upsmr |= UCC_GETH_UPSMR_SGMM;
-
- out_be32(&uf_regs->upsmr, upsmr);
-
- /* Disable autonegotiation in tbi mode, because by default it
- comes up in autonegotiation mode. */
- /* Note that this depends on proper setting in utbipar register. */
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- struct ucc_geth_info *ug_info = ugeth->ug_info;
- struct phy_device *tbiphy;
-
- if (!ug_info->tbi_node)
- pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
-
- tbiphy = of_phy_find_device(ug_info->tbi_node);
- if (!tbiphy)
- pr_warn("Could not get TBI device\n");
-
- value = phy_read(tbiphy, ENET_TBI_MII_CR);
- value &= ~0x1000; /* Turn off autonegotiation */
- phy_write(tbiphy, ENET_TBI_MII_CR, value);
-
- put_device(&tbiphy->mdio.dev);
- }
-
- init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
-
- ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
- if (ret_val != 0) {
- if (netif_msg_probe(ugeth))
- pr_err("Preamble length must be between 3 and 7 inclusive\n");
- return ret_val;
- }
-
- return 0;
+ return phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_RMII ||
+ interface == PHY_INTERFACE_MODE_RTBI;
}
static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
@@ -1545,108 +1434,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
/* allow to xmit again */
netif_tx_wake_all_queues(ugeth->ndev);
- __netdev_watchdog_up(ugeth->ndev);
-}
-
-/* Called every time the controller might need to be made
- * aware of new link state. The PHY code conveys this
- * information through variables in the ugeth structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-
-static void adjust_link(struct net_device *dev)
-{
- struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct ucc_geth __iomem *ug_regs;
- struct ucc_fast __iomem *uf_regs;
- struct phy_device *phydev = ugeth->phydev;
- int new_state = 0;
-
- ug_regs = ugeth->ug_regs;
- uf_regs = ugeth->uccf->uf_regs;
-
- if (phydev->link) {
- u32 tempval = in_be32(&ug_regs->maccfg2);
- u32 upsmr = in_be32(&uf_regs->upsmr);
- /* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
- if (phydev->duplex != ugeth->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- tempval &= ~(MACCFG2_FDX);
- else
- tempval |= MACCFG2_FDX;
- ugeth->oldduplex = phydev->duplex;
- }
-
- if (phydev->speed != ugeth->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case SPEED_1000:
- tempval = ((tempval &
- ~(MACCFG2_INTERFACE_MODE_MASK)) |
- MACCFG2_INTERFACE_MODE_BYTE);
- break;
- case SPEED_100:
- case SPEED_10:
- tempval = ((tempval &
- ~(MACCFG2_INTERFACE_MODE_MASK)) |
- MACCFG2_INTERFACE_MODE_NIBBLE);
- /* if reduced mode, re-set UPSMR.R10M */
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- if (phydev->speed == SPEED_10)
- upsmr |= UCC_GETH_UPSMR_R10M;
- else
- upsmr &= ~UCC_GETH_UPSMR_R10M;
- }
- break;
- default:
- if (netif_msg_link(ugeth))
- pr_warn(
- "%s: Ack! Speed (%d) is not 10/100/1000!",
- dev->name, phydev->speed);
- break;
- }
- ugeth->oldspeed = phydev->speed;
- }
-
- if (!ugeth->oldlink) {
- new_state = 1;
- ugeth->oldlink = 1;
- }
-
- if (new_state) {
- /*
- * To change the MAC configuration we need to disable
- * the controller. To do so, we have to either grab
- * ugeth->lock, which is a bad idea since 'graceful
- * stop' commands might take quite a while, or we can
- * quiesce driver's activity.
- */
- ugeth_quiesce(ugeth);
- ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
-
- out_be32(&ug_regs->maccfg2, tempval);
- out_be32(&uf_regs->upsmr, upsmr);
-
- ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- ugeth_activate(ugeth);
- }
- } else if (ugeth->oldlink) {
- new_state = 1;
- ugeth->oldlink = 0;
- ugeth->oldspeed = 0;
- ugeth->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(ugeth))
- phy_print_status(phydev);
+ netdev_watchdog_up(ugeth->ndev);
}
/* Initialize TBI PHY interface for communicating with the
@@ -1664,8 +1452,7 @@ static void uec_configure_serdes(struct net_device *dev)
struct phy_device *tbiphy;
if (!ug_info->tbi_node) {
- dev_warn(&dev->dev, "SGMII mode requires that the device "
- "tree specify a tbi-handle\n");
+ dev_warn(&dev->dev, "SGMII mode requires that the device tree specify a tbi-handle\n");
return;
}
@@ -1696,34 +1483,145 @@ static void uec_configure_serdes(struct net_device *dev)
put_device(&tbiphy->mdio.dev);
}
-/* Configure the PHY for dev.
- * returns 0 if success. -1 if failure
- */
-static int init_phy(struct net_device *dev)
+static void ugeth_mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct ucc_geth_private *priv = netdev_priv(dev);
- struct ucc_geth_info *ug_info = priv->ug_info;
- struct phy_device *phydev;
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
+ struct ucc_fast __iomem *uf_regs = ugeth->uccf->uf_regs;
+ u32 old_maccfg2, maccfg2 = in_be32(&ug_regs->maccfg2);
+ u32 old_upsmr, upsmr = in_be32(&uf_regs->upsmr);
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
+ old_maccfg2 = maccfg2;
+ old_upsmr = upsmr;
- phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
- priv->phy_interface);
- if (!phydev) {
- dev_err(&dev->dev, "Could not attach to PHY\n");
- return -ENODEV;
+ /* No length check */
+ maccfg2 &= ~MACCFG2_LC;
+ maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+ upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
+ UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
+
+ if (speed == SPEED_10 || speed == SPEED_100)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ else if (speed == SPEED_1000)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+
+ maccfg2 |= ug_info->padAndCrc;
+
+ if (phy_interface_mode_is_reduced(interface)) {
+
+ if (interface != PHY_INTERFACE_MODE_RMII)
+ upsmr |= UCC_GETH_UPSMR_RPM;
+
+ switch (speed) {
+ case SPEED_10:
+ upsmr |= UCC_GETH_UPSMR_R10M;
+ fallthrough;
+ case SPEED_100:
+ if (interface != PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_RMM;
+ }
}
- if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
- uec_configure_serdes(dev);
+ if (interface == PHY_INTERFACE_MODE_TBI ||
+ interface == PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_TBIM;
- phy_set_max_speed(phydev, priv->max_speed);
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ upsmr |= UCC_GETH_UPSMR_SGMM;
- priv->phydev = phydev;
+ if (duplex == DUPLEX_HALF)
+ maccfg2 &= ~(MACCFG2_FDX);
+ else
+ maccfg2 |= MACCFG2_FDX;
- return 0;
+ if (maccfg2 != old_maccfg2 || upsmr != old_upsmr) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, maccfg2);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
+
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ uec_configure_serdes(ndev);
+
+ if (!phylink_autoneg_inband(mode)) {
+ ug_info->aufc = 0;
+ ug_info->receiveFlowControl = rx_pause;
+ ug_info->transmitFlowControl = tx_pause;
+
+ init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+}
+
+static void ugeth_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+}
+
+static void ugeth_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ u16 value;
+
+ if (state->interface == PHY_INTERFACE_MODE_TBI ||
+ state->interface == PHY_INTERFACE_MODE_RTBI) {
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ pr_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
+ value &= ~0x1000; /* Turn off autonegotiation */
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
+
+ put_device(&tbiphy->mdio.dev);
+ }
+
+ if (phylink_autoneg_inband(mode)) {
+ ug_info->aufc = 1;
+
+ init_flow_control_params(ug_info->aufc, 1, 1,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
}
static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
@@ -1995,7 +1893,6 @@ static void ucc_geth_set_multi(struct net_device *dev)
static void ucc_geth_stop(struct ucc_geth_private *ugeth)
{
struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
- struct phy_device *phydev = ugeth->phydev;
ugeth_vdbg("%s: IN", __func__);
@@ -2004,7 +1901,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
* Must be done before disabling the controller
* or deadlock may happen.
*/
- phy_stop(phydev);
+ phylink_stop(ugeth->phylink);
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
@@ -3246,12 +3143,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
goto err;
}
- err = adjust_enet_interface(ugeth);
- if (err) {
- netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
- goto err;
- }
-
/* Set MACSTNADDR1, MACSTNADDR2 */
/* For more details see the hardware spec. */
init_mac_station_addr_regs(dev->dev_addr[0],
@@ -3263,12 +3154,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
&ugeth->ug_regs->macstnaddr1,
&ugeth->ug_regs->macstnaddr2);
- err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- if (err) {
- netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
- goto err;
- }
-
return 0;
err:
ucc_geth_stop(ugeth);
@@ -3291,10 +3176,10 @@ static int ucc_geth_open(struct net_device *dev)
return -EINVAL;
}
- err = init_phy(dev);
+ err = phylink_of_phy_connect(ugeth->phylink, ugeth->dev->of_node, 0);
if (err) {
- netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
- return err;
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
}
err = ucc_geth_init_mac(ugeth);
@@ -3310,13 +3195,13 @@ static int ucc_geth_open(struct net_device *dev)
goto err;
}
- phy_start(ugeth->phydev);
+ phylink_start(ugeth->phylink);
napi_enable(&ugeth->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
device_set_wakeup_capable(&dev->dev,
- qe_alive_during_sleep() || ugeth->phydev->irq);
+ qe_alive_during_sleep() || dev->phydev->irq);
device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
return err;
@@ -3337,8 +3222,7 @@ static int ucc_geth_close(struct net_device *dev)
cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
- phy_disconnect(ugeth->phydev);
- ugeth->phydev = NULL;
+ phylink_disconnect_phy(ugeth->phylink);
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
@@ -3372,7 +3256,7 @@ static void ucc_geth_timeout_work(struct work_struct *work)
ucc_geth_stop(ugeth);
ucc_geth_init_mac(ugeth);
/* Must start PHY here */
- phy_start(ugeth->phydev);
+ phylink_start(ugeth->phylink);
netif_tx_start_all_queues(dev);
}
@@ -3397,6 +3281,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct net_device *ndev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ bool mac_wol = false;
if (!netif_running(ndev))
return 0;
@@ -3410,14 +3295,17 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
*/
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
- if (ugeth->wol_en & WAKE_MAGIC) {
+ if (ugeth->wol_en & WAKE_MAGIC && !ugeth->phy_wol_en) {
setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
- } else if (!(ugeth->wol_en & WAKE_PHY)) {
- phy_stop(ugeth->phydev);
+ mac_wol = true;
}
+ rtnl_lock();
+ phylink_suspend(ugeth->phylink, mac_wol);
+ rtnl_unlock();
+
return 0;
}
@@ -3451,12 +3339,9 @@ static int ucc_geth_resume(struct platform_device *ofdev)
}
}
- ugeth->oldlink = 0;
- ugeth->oldspeed = 0;
- ugeth->oldduplex = -1;
-
- phy_stop(ugeth->phydev);
- phy_start(ugeth->phydev);
+ rtnl_lock();
+ phylink_resume(ugeth->phylink);
+ rtnl_unlock();
napi_enable(&ugeth->napi);
netif_device_attach(ndev);
@@ -3469,32 +3354,6 @@ static int ucc_geth_resume(struct platform_device *ofdev)
#define ucc_geth_resume NULL
#endif
-static phy_interface_t to_phy_interface(const char *phy_connection_type)
-{
- if (strcasecmp(phy_connection_type, "mii") == 0)
- return PHY_INTERFACE_MODE_MII;
- if (strcasecmp(phy_connection_type, "gmii") == 0)
- return PHY_INTERFACE_MODE_GMII;
- if (strcasecmp(phy_connection_type, "tbi") == 0)
- return PHY_INTERFACE_MODE_TBI;
- if (strcasecmp(phy_connection_type, "rmii") == 0)
- return PHY_INTERFACE_MODE_RMII;
- if (strcasecmp(phy_connection_type, "rgmii") == 0)
- return PHY_INTERFACE_MODE_RGMII;
- if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
- return PHY_INTERFACE_MODE_RGMII_ID;
- if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
- return PHY_INTERFACE_MODE_RGMII_TXID;
- if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
- return PHY_INTERFACE_MODE_RGMII_RXID;
- if (strcasecmp(phy_connection_type, "rtbi") == 0)
- return PHY_INTERFACE_MODE_RTBI;
- if (strcasecmp(phy_connection_type, "sgmii") == 0)
- return PHY_INTERFACE_MODE_SGMII;
-
- return PHY_INTERFACE_MODE_MII;
-}
-
static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
@@ -3502,10 +3361,7 @@ static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (!ugeth->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(ugeth->phydev, rq, cmd);
+ return phylink_mii_ioctl(ugeth->phylink, rq, cmd);
}
static const struct net_device_ops ucc_geth_netdev_ops = {
@@ -3513,7 +3369,6 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_stop = ucc_geth_close,
.ndo_start_xmit = ucc_geth_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
@@ -3553,6 +3408,12 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which,
return 0;
}
+static const struct phylink_mac_ops ugeth_mac_ops = {
+ .mac_link_up = ugeth_mac_link_up,
+ .mac_link_down = ugeth_mac_link_down,
+ .mac_config = ugeth_mac_config,
+};
+
static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
@@ -3560,23 +3421,12 @@ static int ucc_geth_probe(struct platform_device* ofdev)
struct net_device *dev = NULL;
struct ucc_geth_private *ugeth = NULL;
struct ucc_geth_info *ug_info;
+ struct device_node *phy_node;
+ struct phylink *phylink;
struct resource res;
- int err, ucc_num, max_speed = 0;
+ int err, ucc_num;
const unsigned int *prop;
phy_interface_t phy_interface;
- static const int enet_to_speed[] = {
- SPEED_10, SPEED_10, SPEED_10,
- SPEED_100, SPEED_100, SPEED_100,
- SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
- };
- static const phy_interface_t enet_to_phy_interface[] = {
- PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
- PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
- PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
- PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
- PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
- PHY_INTERFACE_MODE_SGMII,
- };
ugeth_vdbg("%s: IN", __func__);
@@ -3612,57 +3462,35 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
- ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
- if (!ug_info->phy_node && of_phy_is_fixed_link(np)) {
- /*
- * In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- err = of_phy_register_fixed_link(np);
- if (err)
- return err;
- ug_info->phy_node = of_node_get(np);
- }
-
/* Find the TBI PHY node. If it's not there, we don't support SGMII */
ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
- /* get the phy interface type, or default to MII */
- prop = of_get_property(np, "phy-connection-type", NULL);
- if (!prop) {
- /* handle interface property present in old trees */
- prop = of_get_property(ug_info->phy_node, "interface", NULL);
- if (prop != NULL) {
- phy_interface = enet_to_phy_interface[*prop];
- max_speed = enet_to_speed[*prop];
- } else
- phy_interface = PHY_INTERFACE_MODE_MII;
- } else {
- phy_interface = to_phy_interface((const char *)prop);
- }
-
- /* get speed, or derive from PHY interface */
- if (max_speed == 0)
- switch (phy_interface) {
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_TBI:
- case PHY_INTERFACE_MODE_RTBI:
- case PHY_INTERFACE_MODE_SGMII:
- max_speed = SPEED_1000;
- break;
- default:
- max_speed = SPEED_100;
- break;
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (phy_node) {
+ prop = of_get_property(phy_node, "interface", NULL);
+ if (prop) {
+ dev_err(&ofdev->dev,
+ "Device-tree property 'interface' is no longer supported. Please use 'phy-connection-type' instead.");
+ of_node_put(phy_node);
+ err = -EINVAL;
+ goto err_put_tbi;
}
+ of_node_put(phy_node);
+ }
+
+ err = of_get_phy_mode(np, &phy_interface);
+ if (err) {
+ dev_err(&ofdev->dev, "Invalid phy-connection-type");
+ goto err_put_tbi;
+ }
- if (max_speed == SPEED_1000) {
+ if (phy_interface == PHY_INTERFACE_MODE_GMII ||
+ phy_interface_mode_is_rgmii(phy_interface) ||
+ phy_interface == PHY_INTERFACE_MODE_TBI ||
+ phy_interface == PHY_INTERFACE_MODE_RTBI ||
+ phy_interface == PHY_INTERFACE_MODE_SGMII) {
unsigned int snums = qe_get_num_of_snums();
- /* configure muram FIFOs for gigabit operation */
ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
@@ -3691,7 +3519,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev = devm_alloc_etherdev(&ofdev->dev, sizeof(*ugeth));
if (!dev) {
err = -ENOMEM;
- goto err_deregister_fixed_link;
+ goto err_put_tbi;
}
ugeth = netdev_priv(dev);
@@ -3718,23 +3546,50 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
- ugeth->phy_interface = phy_interface;
- ugeth->max_speed = max_speed;
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(dev);
+ ugeth->phylink_config.dev = &dev->dev;
+ ugeth->phylink_config.type = PHYLINK_NETDEV;
+
+ ugeth->phylink_config.mac_capabilities =
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ ugeth->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(ugeth->phylink_config.supported_interfaces);
+
+ if (ug_info->tbi_node) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TBI,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RTBI,
+ ugeth->phylink_config.supported_interfaces);
+ }
+
+ phylink = phylink_create(&ugeth->phylink_config, dev_fwnode(&dev->dev),
+ phy_interface, &ugeth_mac_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_put_tbi;
+ }
+
+ ugeth->phylink = phylink;
err = devm_register_netdev(&ofdev->dev, dev);
if (err) {
if (netif_msg_probe(ugeth))
pr_err("%s: Cannot register net device, aborting\n",
dev->name);
- goto err_deregister_fixed_link;
+ goto err_destroy_phylink;
}
err = of_get_ethdev_address(np, dev);
if (err == -EPROBE_DEFER)
- goto err_deregister_fixed_link;
+ goto err_destroy_phylink;
ugeth->ug_info = ug_info;
ugeth->dev = device;
@@ -3743,11 +3598,11 @@ static int ucc_geth_probe(struct platform_device* ofdev)
return 0;
-err_deregister_fixed_link:
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
+err_destroy_phylink:
+ phylink_destroy(phylink);
+err_put_tbi:
of_node_put(ug_info->tbi_node);
- of_node_put(ug_info->phy_node);
+
return err;
}
@@ -3755,13 +3610,10 @@ static void ucc_geth_remove(struct platform_device* ofdev)
{
struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct device_node *np = ofdev->dev.of_node;
ucc_geth_memclean(ugeth);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
+ phylink_destroy(ugeth->phylink);
of_node_put(ugeth->ug_info->tbi_node);
- of_node_put(ugeth->ug_info->phy_node);
}
static const struct of_device_id ucc_geth_match[] = {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 4294ed096ebb..84f92f6384e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/phylink.h>
#include <linux/if_ether.h>
#include <soc/fsl/qe/immap_qe.h>
@@ -889,8 +890,6 @@ struct ucc_geth_hardware_statistics {
addresses */
#define TX_TIMEOUT (1*HZ)
-#define PHY_INIT_TIMEOUT 100000
-#define PHY_CHANGE_TIME 2
/* Fast Ethernet (10/100 Mbps) */
#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
@@ -921,7 +920,8 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1
#define UCC_GETH_MACCFG1_INIT 0
-#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
+#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1 | \
+ (7 << MACCFG2_PREL_SHIFT))
/* Ethernet Address Type. */
enum enet_addr_type {
@@ -1073,6 +1073,9 @@ struct ucc_geth_tad_params {
u16 vid;
};
+struct phylink;
+struct phylink_config;
+
/* GETH protocol initialization structure */
struct ucc_geth_info {
struct ucc_fast_info uf_info;
@@ -1088,7 +1091,6 @@ struct ucc_geth_info {
u8 miminumInterFrameGapEnforcement;
u8 backToBackInterFrameGap;
int ipAddressAlignment;
- int lengthCheckRx;
u32 mblinterval;
u16 nortsrbytetime;
u8 fracsiz;
@@ -1114,7 +1116,6 @@ struct ucc_geth_info {
int transmitFlowControl;
u8 maxGroupAddrInHash;
u8 maxIndAddrInHash;
- u8 prel;
u16 maxFrameLength;
u16 minFrameLength;
u16 maxD1Length;
@@ -1125,7 +1126,6 @@ struct ucc_geth_info {
u32 eventRegMask;
u16 pausePeriod;
u16 extensionField;
- struct device_node *phy_node;
struct device_node *tbi_node;
u8 weightfactor[NUM_TX_QUEUES];
u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
@@ -1210,14 +1210,12 @@ struct ucc_geth_private {
u16 skb_dirtytx[NUM_TX_QUEUES];
struct ugeth_mii_info *mii_info;
- struct phy_device *phydev;
- phy_interface_t phy_interface;
- int max_speed;
uint32_t msg_enable;
- int oldspeed;
- int oldduplex;
- int oldlink;
- int wol_en;
+ u32 wol_en;
+ u32 phy_wol_en;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
struct device_node *node;
};
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 699f346faf5c..1fb49e5a414a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -104,14 +104,8 @@ static int
uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (!phydev)
- return -ENODEV;
-
- phy_ethtool_ksettings_get(phydev, cmd);
-
- return 0;
+ return phylink_ethtool_ksettings_get(ugeth->phylink, cmd);
}
static int
@@ -119,12 +113,8 @@ uec_set_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_ksettings_set(phydev, cmd);
+ return phylink_ethtool_ksettings_set(ugeth->phylink, cmd);
}
static void
@@ -133,12 +123,7 @@ uec_get_pauseparam(struct net_device *netdev,
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- pause->autoneg = ugeth->phydev->autoneg;
-
- if (ugeth->ug_info->receiveFlowControl)
- pause->rx_pause = 1;
- if (ugeth->ug_info->transmitFlowControl)
- pause->tx_pause = 1;
+ return phylink_ethtool_get_pauseparam(ugeth->phylink, pause);
}
static int
@@ -146,30 +131,11 @@ uec_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- int ret = 0;
ugeth->ug_info->receiveFlowControl = pause->rx_pause;
ugeth->ug_info->transmitFlowControl = pause->tx_pause;
- if (ugeth->phydev->autoneg) {
- if (netif_running(netdev)) {
- /* FIXME: automatically restart */
- netdev_info(netdev, "Please re-open the interface\n");
- }
- } else {
- struct ucc_geth_info *ug_info = ugeth->ug_info;
-
- ret = init_flow_control_params(ug_info->aufc,
- ug_info->receiveFlowControl,
- ug_info->transmitFlowControl,
- ug_info->pausePeriod,
- ug_info->extensionField,
- &ugeth->uccf->uf_regs->upsmr,
- &ugeth->ug_regs->uempr,
- &ugeth->ug_regs->maccfg1);
- }
-
- return ret;
+ return phylink_ethtool_set_pauseparam(ugeth->phylink, pause);
}
static uint32_t
@@ -343,28 +309,42 @@ uec_get_drvinfo(struct net_device *netdev,
static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (phydev && phydev->irq)
- wol->supported |= WAKE_PHY;
+ phylink_ethtool_get_wol(ugeth->phylink, wol);
+
if (qe_alive_during_sleep())
wol->supported |= WAKE_MAGIC;
- wol->wolopts = ugeth->wol_en;
+ wol->wolopts |= ugeth->wol_en;
}
static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
+ int ret = 0;
- if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
- return -EINVAL;
- else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq))
+ ret = phylink_ethtool_set_wol(ugeth->phylink, wol);
+ if (ret == -EOPNOTSUPP) {
+ ugeth->phy_wol_en = 0;
+ } else if (ret) {
+ return ret;
+ } else {
+ ugeth->phy_wol_en = wol->wolopts;
+ goto out;
+ }
+
+ /* If the PHY isn't handling the WoL and the MAC is asked to more than
+ * WAKE_MAGIC, error-out
+ */
+ if (!ugeth->phy_wol_en &&
+ wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep())
+
+ if (wol->wolopts & WAKE_MAGIC &&
+ !qe_alive_during_sleep())
return -EINVAL;
+out:
ugeth->wol_en = wol->wolopts;
device_set_wakeup_enable(&netdev->dev, ugeth->wol_en);
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index dd92949bb214..2fab38c8ee78 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -59,6 +59,8 @@
#define GVE_MAX_RX_BUFFER_SIZE 4096
+#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
+
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
@@ -68,6 +70,9 @@
#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
+#define GVE_RSS_KEY_SIZE 40
+#define GVE_RSS_INDIR_SIZE 128
+
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
@@ -102,7 +107,13 @@ struct gve_rx_desc_queue {
/* The page info for a single slot in the RX data queue */
struct gve_rx_slot_page_info {
- struct page *page;
+ /* netmem is used for DQO RDA mode
+ * page is used in all other modes
+ */
+ union {
+ struct page *page;
+ netmem_ref netmem;
+ };
void *page_address;
u32 page_offset; /* offset to write to in page */
unsigned int buf_size;
@@ -218,6 +229,11 @@ struct gve_rx_cnts {
/* Contains datapath state used to represent an RX queue. */
struct gve_rx_ring {
struct gve_priv *gve;
+
+ u16 packet_buffer_size; /* Size of buffer posted to NIC */
+ u16 packet_buffer_truesize; /* Total size of RX buffer */
+ u16 rx_headroom;
+
union {
/* GQI fields */
struct {
@@ -226,7 +242,6 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */
u32 db_threshold;
- u16 packet_buffer_size;
u32 qpl_copy_pool_mask;
u32 qpl_copy_pool_head;
@@ -604,8 +619,6 @@ struct gve_tx_ring {
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
struct xsk_buff_pool *xsk_pool;
- u32 xdp_xsk_wakeup;
- u32 xdp_xsk_done;
u64 xdp_xsk_sent;
u64 xdp_xmit;
u64 xdp_xmit_errors;
@@ -624,10 +637,18 @@ struct gve_notify_block {
u32 irq;
};
-/* Tracks allowed and current queue settings */
-struct gve_queue_config {
+/* Tracks allowed and current rx queue settings */
+struct gve_rx_queue_config {
u16 max_queues;
- u16 num_queues; /* current */
+ u16 num_queues;
+ u16 packet_buffer_size;
+};
+
+/* Tracks allowed and current tx queue settings */
+struct gve_tx_queue_config {
+ u16 max_queues;
+ u16 num_queues; /* number of TX queues, excluding XDP queues */
+ u16 num_xdp_queues;
};
/* Tracks the available and used qpl IDs */
@@ -651,11 +672,11 @@ struct gve_ptype_lut {
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
- struct gve_queue_config *qcfg;
+ struct gve_tx_queue_config *qcfg;
+
+ u16 num_xdp_rings;
u16 ring_size;
- u16 start_idx;
- u16 num_rings;
bool raw_addressing;
/* Allocated resources are returned here */
@@ -665,13 +686,15 @@ struct gve_tx_alloc_rings_cfg {
/* Parameters for allocating resources for rx queues */
struct gve_rx_alloc_rings_cfg {
/* tx config is also needed to determine QPL ids */
- struct gve_queue_config *qcfg;
- struct gve_queue_config *qcfg_tx;
+ struct gve_rx_queue_config *qcfg_rx;
+ struct gve_tx_queue_config *qcfg_tx;
u16 ring_size;
u16 packet_buffer_size;
bool raw_addressing;
bool enable_header_split;
+ bool reset_rss;
+ bool xdp;
/* Allocated resources are returned here */
struct gve_rx_ring *rx;
@@ -722,6 +745,11 @@ struct gve_flow_rules_cache {
u32 rule_ids_cache_num;
};
+struct gve_rss_config {
+ u8 *hash_key;
+ u32 *hash_lut;
+};
+
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
@@ -751,9 +779,8 @@ struct gve_priv {
u32 rx_copybreak; /* copy packets smaller than this */
u16 default_num_queues; /* default num queues to set up */
- u16 num_xdp_queues;
- struct gve_queue_config tx_cfg;
- struct gve_queue_config rx_cfg;
+ struct gve_tx_queue_config tx_cfg;
+ struct gve_rx_queue_config rx_cfg;
u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
@@ -823,7 +850,6 @@ struct gve_priv {
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
- u16 data_buffer_size_dqo;
u16 max_rx_buffer_size; /* device limit */
enum gve_queue_format queue_format;
@@ -842,6 +868,8 @@ struct gve_priv {
u16 rss_key_size;
u16 rss_lut_size;
+ bool cache_rss_config;
+ struct gve_rss_config rss_config;
};
enum gve_service_task_flags_bit {
@@ -1024,27 +1052,16 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
}
/* Returns the number of tx queue page lists */
-static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
- int num_xdp_queues,
+static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
bool is_qpl)
{
if (!is_qpl)
return 0;
- return tx_cfg->num_queues + num_xdp_queues;
-}
-
-/* Returns the number of XDP tx queue page lists
- */
-static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
-{
- if (priv->queue_format != GVE_GQI_QPL_FORMAT)
- return 0;
-
- return priv->num_xdp_queues;
+ return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
}
/* Returns the number of rx queue page lists */
-static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
+static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
bool is_qpl)
{
if (!is_qpl)
@@ -1062,7 +1079,8 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
-static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
+static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
+ int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
}
@@ -1072,7 +1090,7 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
return gve_tx_qpl_id(priv, 0);
}
-static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
+static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
{
return gve_get_rx_qpl_id(tx_cfg, 0);
}
@@ -1103,7 +1121,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{
- return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+ return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
}
static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
@@ -1116,6 +1134,16 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
return gve_xdp_tx_queue_id(priv, 0);
}
+static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
+{
+ switch (priv->queue_format) {
+ case GVE_GQI_QPL_FORMAT:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* gqi napi handler defined in gve_main.c */
int gve_napi_poll(struct napi_struct *napi, int budget);
@@ -1140,6 +1168,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
+int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_free_rings_gqi(struct gve_priv *priv,
@@ -1196,7 +1225,8 @@ void gve_free_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state);
int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
- struct gve_rx_ring *rx);
+ struct gve_rx_ring *rx,
+ bool xdp);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
@@ -1208,14 +1238,17 @@ int gve_adjust_config(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
- struct gve_queue_config new_rx_config,
- struct gve_queue_config new_tx_config);
+ struct gve_rx_queue_config new_rx_config,
+ struct gve_tx_queue_config new_tx_config,
+ bool reset_rss);
/* flow steering rule */
int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_flow_rules_reset(struct gve_priv *priv);
+/* RSS config */
+int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 060e0e674938..3e8fc33cc11f 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -731,6 +731,7 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
.ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
.rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
+ .packet_buffer_size = cpu_to_be16(rx->packet_buffer_size),
};
if (gve_is_gqi(priv)) {
@@ -743,7 +744,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
cpu_to_be64(rx->data.data_bus);
cmd->create_rx_queue.index = cpu_to_be32(queue_index);
cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
u32 qpl_id = 0;
@@ -756,8 +756,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
cpu_to_be64(rx->dqo.complq.bus);
cmd->create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->dqo.bufq.bus);
- cmd->create_rx_queue.packet_buffer_size =
- cpu_to_be16(priv->data_buffer_size_dqo);
cmd->create_rx_queue.rx_buff_ring_size =
cpu_to_be16(priv->rx_desc_cnt);
cmd->create_rx_queue.enable_rsc =
@@ -885,6 +883,15 @@ static void gve_set_default_desc_cnt(struct gve_priv *priv,
priv->min_rx_desc_cnt = priv->rx_desc_cnt;
}
+static void gve_set_default_rss_sizes(struct gve_priv *priv)
+{
+ if (!gve_is_gqi(priv)) {
+ priv->rss_key_size = GVE_RSS_KEY_SIZE;
+ priv->rss_lut_size = GVE_RSS_INDIR_SIZE;
+ priv->cache_rss_config = true;
+ }
+}
+
static void gve_enable_supported_features(struct gve_priv *priv,
u32 supported_features_mask,
const struct gve_device_option_jumbo_frames
@@ -968,6 +975,10 @@ static void gve_enable_supported_features(struct gve_priv *priv,
be16_to_cpu(dev_op_rss_config->hash_key_size);
priv->rss_lut_size =
be16_to_cpu(dev_op_rss_config->hash_lut_size);
+ priv->cache_rss_config = false;
+ dev_dbg(&priv->pdev->dev,
+ "RSS device option enabled with key size of %u, lut size of %u.\n",
+ priv->rss_key_size, priv->rss_lut_size);
}
}
@@ -1052,6 +1063,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
/* set default descriptor counts */
gve_set_default_desc_cnt(priv, descriptor);
+ gve_set_default_rss_sizes(priv);
+
/* DQO supports LRO. */
if (!gve_is_gqi(priv))
priv->dev->hw_features |= NETIF_F_LRO;
@@ -1128,20 +1141,6 @@ int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
return gve_adminq_execute_cmd(priv, &cmd);
}
-int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
-{
- union gve_adminq_command cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
- cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
- .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
- .parameter_value = cpu_to_be64(mtu),
- };
-
- return gve_adminq_execute_cmd(priv, &cmd);
-}
-
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval)
{
@@ -1290,8 +1289,9 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv)
int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
{
+ const u32 *hash_lut_to_config = NULL;
+ const u8 *hash_key_to_config = NULL;
dma_addr_t lut_bus = 0, key_bus = 0;
- u16 key_size = 0, lut_size = 0;
union gve_adminq_command cmd;
__be32 *lut = NULL;
u8 hash_alg = 0;
@@ -1301,7 +1301,7 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
switch (rxfh->hfunc) {
case ETH_RSS_HASH_NO_CHANGE:
- break;
+ fallthrough;
case ETH_RSS_HASH_TOP:
hash_alg = ETH_RSS_HASH_TOP;
break;
@@ -1310,27 +1310,46 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
}
if (rxfh->indir) {
- lut_size = priv->rss_lut_size;
+ if (rxfh->indir_size != priv->rss_lut_size)
+ return -EINVAL;
+
+ hash_lut_to_config = rxfh->indir;
+ } else if (priv->cache_rss_config) {
+ hash_lut_to_config = priv->rss_config.hash_lut;
+ }
+
+ if (hash_lut_to_config) {
lut = dma_alloc_coherent(&priv->pdev->dev,
- lut_size * sizeof(*lut),
+ priv->rss_lut_size * sizeof(*lut),
&lut_bus, GFP_KERNEL);
if (!lut)
return -ENOMEM;
for (i = 0; i < priv->rss_lut_size; i++)
- lut[i] = cpu_to_be32(rxfh->indir[i]);
+ lut[i] = cpu_to_be32(hash_lut_to_config[i]);
}
if (rxfh->key) {
- key_size = priv->rss_key_size;
+ if (rxfh->key_size != priv->rss_key_size) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ hash_key_to_config = rxfh->key;
+ } else if (priv->cache_rss_config) {
+ hash_key_to_config = priv->rss_config.hash_key;
+ }
+
+ if (hash_key_to_config) {
key = dma_alloc_coherent(&priv->pdev->dev,
- key_size, &key_bus, GFP_KERNEL);
+ priv->rss_key_size,
+ &key_bus, GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto out;
}
- memcpy(key, rxfh->key, key_size);
+ memcpy(key, hash_key_to_config, priv->rss_key_size);
}
/* Zero-valued fields in the cmd.configure_rss instruct the device to
@@ -1344,8 +1363,10 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
BIT(GVE_RSS_HASH_TCPV6) |
BIT(GVE_RSS_HASH_UDPV6)),
.hash_alg = hash_alg,
- .hash_key_size = cpu_to_be16(key_size),
- .hash_lut_size = cpu_to_be16(lut_size),
+ .hash_key_size =
+ cpu_to_be16((key_bus) ? priv->rss_key_size : 0),
+ .hash_lut_size =
+ cpu_to_be16((lut_bus) ? priv->rss_lut_size : 0),
.hash_key_addr = cpu_to_be64(key_bus),
.hash_lut_addr = cpu_to_be64(lut_bus),
};
@@ -1355,11 +1376,11 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
out:
if (lut)
dma_free_coherent(&priv->pdev->dev,
- lut_size * sizeof(*lut),
+ priv->rss_lut_size * sizeof(*lut),
lut, lut_bus);
if (key)
dma_free_coherent(&priv->pdev->dev,
- key_size, key, key_bus);
+ priv->rss_key_size, key, key_bus);
return err;
}
@@ -1463,12 +1484,15 @@ static int gve_adminq_process_rss_query(struct gve_priv *priv,
rxfh->hfunc = descriptor->hash_alg;
rss_info_addr = (void *)(descriptor + 1);
- if (rxfh->key)
+ if (rxfh->key) {
+ rxfh->key_size = priv->rss_key_size;
memcpy(rxfh->key, rss_info_addr, priv->rss_key_size);
+ }
rss_info_addr += priv->rss_key_size;
lut = (__be32 *)rss_info_addr;
if (rxfh->indir) {
+ rxfh->indir_size = priv->rss_lut_size;
for (i = 0; i < priv->rss_lut_size; i++)
rxfh->indir[i] = be32_to_cpu(lut[i]);
}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 863683de9694..228217458275 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -612,7 +612,6 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);
int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
-int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval);
int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index 403f0f335ba6..a71883e1d920 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -139,7 +139,8 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
buf_state->page_info.page_offset = 0;
buf_state->page_info.page_address =
page_address(buf_state->page_info.page);
- buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
+ buf_state->page_info.buf_size = rx->packet_buffer_truesize;
+ buf_state->page_info.pad = rx->rx_headroom;
buf_state->last_single_ref_offset = 0;
/* The page already has 1 ref. */
@@ -162,7 +163,7 @@ void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- const u16 data_buffer_size = priv->data_buffer_size_dqo;
+ const u16 data_buffer_size = rx->packet_buffer_truesize;
int pagecount;
/* Can't reuse if we only fit one buffer per page */
@@ -205,38 +206,40 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state,
bool allow_direct)
{
- struct page *page = buf_state->page_info.page;
+ netmem_ref netmem = buf_state->page_info.netmem;
- if (!page)
+ if (!netmem)
return;
- page_pool_put_full_page(page->pp, page, allow_direct);
- buf_state->page_info.page = NULL;
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct);
+ buf_state->page_info.netmem = 0;
}
static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- struct gve_priv *priv = rx->gve;
- struct page *page;
+ netmem_ref netmem;
- buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
- page = page_pool_alloc(rx->dqo.page_pool,
- &buf_state->page_info.page_offset,
- &buf_state->page_info.buf_size, GFP_ATOMIC);
+ buf_state->page_info.buf_size = rx->packet_buffer_truesize;
+ netmem = page_pool_alloc_netmem(rx->dqo.page_pool,
+ &buf_state->page_info.page_offset,
+ &buf_state->page_info.buf_size,
+ GFP_ATOMIC);
- if (!page)
+ if (!netmem)
return -ENOMEM;
- buf_state->page_info.page = page;
- buf_state->page_info.page_address = page_address(page);
- buf_state->addr = page_pool_get_dma_addr(page);
+ buf_state->page_info.netmem = netmem;
+ buf_state->page_info.page_address = netmem_address(netmem);
+ buf_state->addr = page_pool_get_dma_addr_netmem(netmem);
+ buf_state->page_info.pad = rx->dqo.page_pool->p.offset;
return 0;
}
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
- struct gve_rx_ring *rx)
+ struct gve_rx_ring *rx,
+ bool xdp)
{
u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
struct page_pool_params pp = {
@@ -247,7 +250,8 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
.netdev = priv->dev,
.napi = &priv->ntfy_blocks[ntfy_id].napi,
.max_len = PAGE_SIZE,
- .dma_dir = DMA_FROM_DEVICE,
+ .dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = xdp ? XDP_PACKET_HEADROOM : 0,
};
return page_pool_create(&pp);
@@ -269,7 +273,7 @@ void gve_reuse_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
if (rx->dqo.page_pool) {
- buf_state->page_info.page = NULL;
+ buf_state->page_info.netmem = 0;
gve_free_buf_state(rx, buf_state);
} else {
gve_dec_pagecnt_bias(&buf_state->page_info);
@@ -301,7 +305,8 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
}
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
desc->buf_addr = cpu_to_le64(buf_state->addr +
- buf_state->page_info.page_offset);
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index bdfc6e77b2af..3c1da0cf3f61 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -63,11 +63,11 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
- "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
- "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
+ "tx_dma_mapping_error[%u]",
+ "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
};
-static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
+static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] __nonstring_array = {
"adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
"adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
"adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
@@ -113,7 +113,7 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
i);
for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
- ethtool_puts(&s, gve_gstrings_adminq_stats[i]);
+ ethtool_cpy(&s, gve_gstrings_adminq_stats[i]);
break;
@@ -392,7 +392,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
*/
data[i++] = 0;
data[i++] = 0;
- data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
+ data[i++] =
+ (tx->dqo_tx.tail - tx->dqo_tx.head) &
+ tx->mask;
}
do {
start =
@@ -417,9 +419,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = value;
}
}
- /* XDP xsk counters */
- data[i++] = tx->xdp_xsk_wakeup;
- data[i++] = tx->xdp_xsk_done;
+ /* XDP counters */
do {
start = u64_stats_fetch_begin(&priv->tx[ring].statss);
data[i] = tx->xdp_xsk_sent;
@@ -477,11 +477,12 @@ static int gve_set_channels(struct net_device *netdev,
struct ethtool_channels *cmd)
{
struct gve_priv *priv = netdev_priv(netdev);
- struct gve_queue_config new_tx_cfg = priv->tx_cfg;
- struct gve_queue_config new_rx_cfg = priv->rx_cfg;
+ struct gve_tx_queue_config new_tx_cfg = priv->tx_cfg;
+ struct gve_rx_queue_config new_rx_cfg = priv->rx_cfg;
struct ethtool_channels old_settings;
int new_tx = cmd->tx_count;
int new_rx = cmd->rx_count;
+ bool reset_rss = false;
gve_get_channels(netdev, &old_settings);
@@ -492,22 +493,27 @@ static int gve_set_channels(struct net_device *netdev,
if (!new_rx || !new_tx)
return -EINVAL;
- if (priv->num_xdp_queues &&
- (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
- dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
- return -EINVAL;
- }
+ if (priv->xdp_prog) {
+ if (new_tx != new_rx ||
+ (2 * new_tx > priv->tx_cfg.max_queues)) {
+ dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed");
+ return -EINVAL;
+ }
- if (!netif_running(netdev)) {
- priv->tx_cfg.num_queues = new_tx;
- priv->rx_cfg.num_queues = new_rx;
- return 0;
+ /* One XDP TX queue per RX queue. */
+ new_tx_cfg.num_xdp_queues = new_rx;
+ } else {
+ new_tx_cfg.num_xdp_queues = 0;
}
+ if (new_rx != priv->rx_cfg.num_queues &&
+ priv->cache_rss_config && !netif_is_rxfh_configured(netdev))
+ reset_rss = true;
+
new_tx_cfg.num_queues = new_tx;
new_rx_cfg.num_queues = new_rx;
- return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
+ return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg, reset_rss);
}
static void gve_get_ringparam(struct net_device *netdev,
@@ -643,8 +649,7 @@ static int gve_set_tunable(struct net_device *netdev,
switch (etuna->id) {
case ETHTOOL_RX_COPYBREAK:
{
- u32 max_copybreak = gve_is_gqi(priv) ?
- GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
+ u32 max_copybreak = priv->rx_cfg.packet_buffer_size;
len = *(u32 *)value;
if (len > max_copybreak)
@@ -700,7 +705,7 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
sizeof(struct stats));
- del_timer_sync(&priv->stats_report_timer);
+ timer_delete_sync(&priv->stats_report_timer);
}
return 0;
}
@@ -855,6 +860,25 @@ static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
return priv->rss_lut_size;
}
+static void gve_get_rss_config_cache(struct gve_priv *priv,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->key) {
+ rxfh->key_size = priv->rss_key_size;
+ memcpy(rxfh->key, rss_config->hash_key, priv->rss_key_size);
+ }
+
+ if (rxfh->indir) {
+ rxfh->indir_size = priv->rss_lut_size;
+ memcpy(rxfh->indir, rss_config->hash_lut,
+ priv->rss_lut_size * sizeof(*rxfh->indir));
+ }
+}
+
static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct gve_priv *priv = netdev_priv(netdev);
@@ -862,18 +886,46 @@ static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rx
if (!priv->rss_key_size || !priv->rss_lut_size)
return -EOPNOTSUPP;
+ if (priv->cache_rss_config) {
+ gve_get_rss_config_cache(priv, rxfh);
+ return 0;
+ }
+
return gve_adminq_query_rss_config(priv, rxfh);
}
+static void gve_set_rss_config_cache(struct gve_priv *priv,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ if (rxfh->key)
+ memcpy(rss_config->hash_key, rxfh->key, priv->rss_key_size);
+
+ if (rxfh->indir)
+ memcpy(rss_config->hash_lut, rxfh->indir,
+ priv->rss_lut_size * sizeof(*rxfh->indir));
+}
+
static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
+ int err;
if (!priv->rss_key_size || !priv->rss_lut_size)
return -EOPNOTSUPP;
- return gve_adminq_configure_rss(priv, rxfh);
+ err = gve_adminq_configure_rss(priv, rxfh);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Fail to configure RSS config");
+ return err;
+ }
+
+ if (priv->cache_rss_config)
+ gve_set_rss_config_cache(priv, rxfh);
+
+ return 0;
}
const struct ethtool_ops gve_ethtool_ops = {
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index e171ca248f9a..dc35a23ec47f 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -184,6 +184,43 @@ static void gve_free_flow_rule_caches(struct gve_priv *priv)
flow_rules_cache->rules_cache = NULL;
}
+static int gve_alloc_rss_config_cache(struct gve_priv *priv)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ if (!priv->cache_rss_config)
+ return 0;
+
+ rss_config->hash_key = kcalloc(priv->rss_key_size,
+ sizeof(rss_config->hash_key[0]),
+ GFP_KERNEL);
+ if (!rss_config->hash_key)
+ return -ENOMEM;
+
+ rss_config->hash_lut = kcalloc(priv->rss_lut_size,
+ sizeof(rss_config->hash_lut[0]),
+ GFP_KERNEL);
+ if (!rss_config->hash_lut)
+ goto free_rss_key_cache;
+
+ return 0;
+
+free_rss_key_cache:
+ kfree(rss_config->hash_key);
+ rss_config->hash_key = NULL;
+ return -ENOMEM;
+}
+
+static void gve_free_rss_config_cache(struct gve_priv *priv)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ kfree(rss_config->hash_key);
+ kfree(rss_config->hash_lut);
+
+ memset(rss_config, 0, sizeof(*rss_config));
+}
+
static int gve_alloc_counter_array(struct gve_priv *priv)
{
priv->counter_array =
@@ -231,7 +268,8 @@ static void gve_stats_report_schedule(struct gve_priv *priv)
static void gve_stats_report_timer(struct timer_list *t)
{
- struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
+ struct gve_priv *priv = timer_container_of(priv, t,
+ stats_report_timer);
mod_timer(&priv->stats_report_timer,
round_jiffies(jiffies +
@@ -265,7 +303,7 @@ static void gve_free_stats_report(struct gve_priv *priv)
if (!priv->stats_report)
return;
- del_timer_sync(&priv->stats_report_timer);
+ timer_delete_sync(&priv->stats_report_timer);
dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
priv->stats_report, priv->stats_report_bus);
priv->stats_report = NULL;
@@ -333,6 +371,14 @@ int gve_napi_poll(struct napi_struct *napi, int budget)
if (block->rx) {
work_done = gve_rx_poll(block, budget);
+
+ /* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
+ * TX and RX work done.
+ */
+ if (priv->xdp_prog)
+ work_done = max_t(int, work_done,
+ gve_xsk_tx_poll(block, budget));
+
reschedule |= work_done == budget;
}
@@ -567,9 +613,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_alloc_flow_rule_caches(priv);
if (err)
return err;
- err = gve_alloc_counter_array(priv);
+ err = gve_alloc_rss_config_cache(priv);
if (err)
goto abort_with_flow_rule_caches;
+ err = gve_alloc_counter_array(priv);
+ if (err)
+ goto abort_with_rss_config_cache;
err = gve_alloc_notify_blocks(priv);
if (err)
goto abort_with_counter;
@@ -603,6 +652,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
}
}
+ err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
+ if (err) {
+ dev_err(&priv->pdev->dev, "Failed to init RSS config");
+ goto abort_with_ptype_lut;
+ }
+
err = gve_adminq_report_stats(priv, priv->stats_report_len,
priv->stats_report_bus,
GVE_STATS_REPORT_TIMER_PERIOD);
@@ -621,6 +676,8 @@ abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
abort_with_counter:
gve_free_counter_array(priv);
+abort_with_rss_config_cache:
+ gve_free_rss_config_cache(priv);
abort_with_flow_rule_caches:
gve_free_flow_rule_caches(priv);
@@ -661,6 +718,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
priv->ptype_lut_dqo = NULL;
gve_free_flow_rule_caches(priv);
+ gve_free_rss_config_cache(priv);
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
@@ -738,30 +796,13 @@ static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx
return rx->dqo.qpl;
}
-static int gve_register_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int err;
- int i;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
- /* This failure will trigger a reset - no need to clean up */
- if (err)
- return err;
- }
- return 0;
-}
-
static int gve_register_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
int err;
int i;
- num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
- gve_is_qpl(priv));
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
@@ -779,30 +820,13 @@ static int gve_register_qpls(struct gve_priv *priv)
return 0;
}
-static int gve_unregister_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int err;
- int i;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
- /* This failure will trigger a reset - no need to clean */
- if (err)
- return err;
- }
- return 0;
-}
-
static int gve_unregister_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
int err;
int i;
- num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
- gve_is_qpl(priv));
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
@@ -821,27 +845,6 @@ static int gve_unregister_qpls(struct gve_priv *priv)
return 0;
}
-static int gve_create_xdp_rings(struct gve_priv *priv)
-{
- int err;
-
- err = gve_adminq_create_tx_queues(priv,
- gve_xdp_tx_start_queue_id(priv),
- priv->num_xdp_queues);
- if (err) {
- netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
- priv->num_xdp_queues);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
- priv->num_xdp_queues);
-
- return 0;
-}
-
static int gve_create_rings(struct gve_priv *priv)
{
int num_tx_queues = gve_num_tx_queues(priv);
@@ -897,7 +900,7 @@ static void init_xdp_sync_stats(struct gve_priv *priv)
int i;
/* Init stats */
- for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
+ for (i = start_id; i < start_id + priv->tx_cfg.num_xdp_queues; i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss);
@@ -925,19 +928,18 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->ring_size = priv->tx_desc_cnt;
- cfg->start_idx = 0;
- cfg->num_rings = gve_num_tx_queues(priv);
+ cfg->num_xdp_rings = cfg->qcfg->num_xdp_queues;
cfg->tx = priv->tx;
}
-static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
+static void gve_tx_stop_rings(struct gve_priv *priv, int num_rings)
{
int i;
if (!priv->tx)
return;
- for (i = start_id; i < start_id + num_rings; i++) {
+ for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_stop_ring_gqi(priv, i);
else
@@ -945,12 +947,11 @@ static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings
}
}
-static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
- int num_rings)
+static void gve_tx_start_rings(struct gve_priv *priv, int num_rings)
{
int i;
- for (i = start_id; i < start_id + num_rings; i++) {
+ for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_start_ring_gqi(priv, i);
else
@@ -958,28 +959,6 @@ static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
}
}
-static int gve_alloc_xdp_rings(struct gve_priv *priv)
-{
- struct gve_tx_alloc_rings_cfg cfg = {0};
- int err = 0;
-
- if (!priv->num_xdp_queues)
- return 0;
-
- gve_tx_get_curr_alloc_cfg(priv, &cfg);
- cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
- cfg.num_rings = priv->num_xdp_queues;
-
- err = gve_tx_alloc_rings_gqi(priv, &cfg);
- if (err)
- return err;
-
- gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
- init_xdp_sync_stats(priv);
-
- return 0;
-}
-
static int gve_queues_mem_alloc(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
@@ -1010,26 +989,6 @@ free_tx:
return err;
}
-static int gve_destroy_xdp_rings(struct gve_priv *priv)
-{
- int start_id;
- int err;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_adminq_destroy_tx_queues(priv,
- start_id,
- priv->num_xdp_queues);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to destroy XDP queues\n");
- /* This failure will trigger a reset - no need to clean up */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
-
- return 0;
-}
-
static int gve_destroy_rings(struct gve_priv *priv)
{
int num_tx_queues = gve_num_tx_queues(priv);
@@ -1054,20 +1013,6 @@ static int gve_destroy_rings(struct gve_priv *priv)
return 0;
}
-static void gve_free_xdp_rings(struct gve_priv *priv)
-{
- struct gve_tx_alloc_rings_cfg cfg = {0};
-
- gve_tx_get_curr_alloc_cfg(priv, &cfg);
- cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
- cfg.num_rings = priv->num_xdp_queues;
-
- if (priv->tx) {
- gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
- gve_tx_free_rings_gqi(priv, &cfg);
- }
-}
-
static void gve_queues_mem_free(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_cfg,
struct gve_rx_alloc_rings_cfg *rx_cfg)
@@ -1194,7 +1139,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
int i, j;
u32 tx_qid;
- if (!priv->num_xdp_queues)
+ if (!priv->tx_cfg.num_xdp_queues)
return 0;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
@@ -1205,8 +1150,14 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
napi->napi_id);
if (err)
goto err;
- err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
+ if (gve_is_qpl(priv))
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ else
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx->dqo.page_pool);
if (err)
goto err;
rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
@@ -1224,7 +1175,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
}
}
- for (i = 0; i < priv->num_xdp_queues; i++) {
+ for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
tx_qid = gve_xdp_tx_queue_id(priv, i);
priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
}
@@ -1245,7 +1196,7 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
{
int i, tx_qid;
- if (!priv->num_xdp_queues)
+ if (!priv->tx_cfg.num_xdp_queues || !priv->rx || !priv->tx)
return;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
@@ -1258,7 +1209,7 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
}
}
- for (i = 0; i < priv->num_xdp_queues; i++) {
+ for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
tx_qid = gve_xdp_tx_queue_id(priv, i);
priv->tx[tx_qid].xsk_pool = NULL;
}
@@ -1275,15 +1226,14 @@ static void gve_drain_page_cache(struct gve_priv *priv)
static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{
- cfg->qcfg = &priv->rx_cfg;
+ cfg->qcfg_rx = &priv->rx_cfg;
cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
cfg->ring_size = priv->rx_desc_cnt;
- cfg->packet_buffer_size = gve_is_gqi(priv) ?
- GVE_DEFAULT_RX_BUFFER_SIZE :
- priv->data_buffer_size_dqo;
+ cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size;
cfg->rx = priv->rx;
+ cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues;
}
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
@@ -1356,17 +1306,13 @@ static int gve_queues_start(struct gve_priv *priv,
/* Record new configs into priv */
priv->tx_cfg = *tx_alloc_cfg->qcfg;
- priv->rx_cfg = *rx_alloc_cfg->qcfg;
+ priv->tx_cfg.num_xdp_queues = tx_alloc_cfg->num_xdp_rings;
+ priv->rx_cfg = *rx_alloc_cfg->qcfg_rx;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
- if (priv->xdp_prog)
- priv->num_xdp_queues = priv->rx_cfg.num_queues;
- else
- priv->num_xdp_queues = 0;
-
- gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
- gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
+ gve_tx_start_rings(priv, gve_num_tx_queues(priv));
+ gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues);
gve_init_sync_stats(priv);
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
@@ -1380,12 +1326,18 @@ static int gve_queues_start(struct gve_priv *priv,
if (err)
goto stop_and_free_rings;
+ if (rx_alloc_cfg->reset_rss) {
+ err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
+ if (err)
+ goto reset;
+ }
+
err = gve_register_qpls(priv);
if (err)
goto reset;
priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
- priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
+ priv->rx_cfg.packet_buffer_size = rx_alloc_cfg->packet_buffer_size;
err = gve_create_rings(priv);
if (err)
@@ -1412,7 +1364,7 @@ reset:
/* return the original error */
return err;
stop_and_free_rings:
- gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
gve_queues_mem_remove(priv);
return err;
@@ -1457,11 +1409,11 @@ static int gve_queues_stop(struct gve_priv *priv)
goto err;
gve_clear_device_rings_ok(priv);
}
- del_timer_sync(&priv->stats_report_timer);
+ timer_delete_sync(&priv->stats_report_timer);
gve_unreg_xdp_info(priv);
- gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
priv->interface_down_cnt++;
@@ -1491,56 +1443,6 @@ static int gve_close(struct net_device *dev)
return 0;
}
-static int gve_remove_xdp_queues(struct gve_priv *priv)
-{
- int err;
-
- err = gve_destroy_xdp_rings(priv);
- if (err)
- return err;
-
- err = gve_unregister_xdp_qpls(priv);
- if (err)
- return err;
-
- gve_unreg_xdp_info(priv);
- gve_free_xdp_rings(priv);
-
- priv->num_xdp_queues = 0;
- return 0;
-}
-
-static int gve_add_xdp_queues(struct gve_priv *priv)
-{
- int err;
-
- priv->num_xdp_queues = priv->rx_cfg.num_queues;
-
- err = gve_alloc_xdp_rings(priv);
- if (err)
- goto err;
-
- err = gve_reg_xdp_info(priv, priv->dev);
- if (err)
- goto free_xdp_rings;
-
- err = gve_register_xdp_qpls(priv);
- if (err)
- goto free_xdp_rings;
-
- err = gve_create_xdp_rings(priv);
- if (err)
- goto free_xdp_rings;
-
- return 0;
-
-free_xdp_rings:
- gve_free_xdp_rings(priv);
-err:
- priv->num_xdp_queues = 0;
- return err;
-}
-
static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
{
if (!gve_get_napi_enabled(priv))
@@ -1558,6 +1460,19 @@ static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
}
}
+static int gve_configure_rings_xdp(struct gve_priv *priv,
+ u16 num_xdp_rings)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ tx_alloc_cfg.num_xdp_rings = num_xdp_rings;
+
+ rx_alloc_cfg.xdp = !!num_xdp_rings;
+ return gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+}
+
static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -1570,29 +1485,26 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
+
+ /* Update priv XDP queue configuration */
+ priv->tx_cfg.num_xdp_queues = priv->xdp_prog ?
+ priv->rx_cfg.num_queues : 0;
return 0;
}
- gve_turndown(priv);
- if (!old_prog && prog) {
- // Allocate XDP TX queues if an XDP program is
- // being installed
- err = gve_add_xdp_queues(priv);
- if (err)
- goto out;
- } else if (old_prog && !prog) {
- // Remove XDP TX queues if an XDP program is
- // being uninstalled
- err = gve_remove_xdp_queues(priv);
- if (err)
- goto out;
- }
+ if (!old_prog && prog)
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ else if (old_prog && !prog)
+ err = gve_configure_rings_xdp(priv, 0);
+
+ if (err)
+ goto out;
+
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
out:
- gve_turnup(priv);
status = ioread32be(&priv->reg_bar0->device_status);
gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
return err;
@@ -1623,8 +1535,8 @@ static int gve_xsk_pool_enable(struct net_device *dev,
if (err)
return err;
- /* If XDP prog is not installed, return */
- if (!priv->xdp_prog)
+ /* If XDP prog is not installed or interface is down, return. */
+ if (!priv->xdp_prog || !netif_running(dev))
return 0;
rx = &priv->rx[qid];
@@ -1669,21 +1581,16 @@ static int gve_xsk_pool_disable(struct net_device *dev,
if (qid >= priv->rx_cfg.num_queues)
return -EINVAL;
- /* If XDP prog is not installed, unmap DMA and return */
- if (!priv->xdp_prog)
- goto done;
-
- tx_qid = gve_xdp_tx_queue_id(priv, qid);
- if (!netif_running(dev)) {
- priv->rx[qid].xsk_pool = NULL;
- xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
- priv->tx[tx_qid].xsk_pool = NULL;
+ /* If XDP prog is not installed or interface is down, unmap DMA and
+ * return.
+ */
+ if (!priv->xdp_prog || !netif_running(dev))
goto done;
- }
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
napi_disable(napi_rx); /* make sure current rx poll is done */
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
napi_disable(napi_tx); /* make sure current tx poll is done */
@@ -1709,24 +1616,20 @@ done:
static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{
struct gve_priv *priv = netdev_priv(dev);
- int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
+ struct napi_struct *napi;
+
+ if (!gve_get_napi_enabled(priv))
+ return -ENETDOWN;
if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
return -EINVAL;
- if (flags & XDP_WAKEUP_TX) {
- struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
- struct napi_struct *napi =
- &priv->ntfy_blocks[tx->ntfy_id].napi;
-
- if (!napi_if_scheduled_mark_missed(napi)) {
- /* Call local_bh_enable to trigger SoftIRQ processing */
- local_bh_disable();
- napi_schedule(napi);
- local_bh_enable();
- }
-
- tx->xdp_xsk_wakeup++;
+ napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
+ if (!napi_if_scheduled_mark_missed(napi)) {
+ /* Call local_bh_enable to trigger SoftIRQ processing */
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
}
return 0;
@@ -1735,6 +1638,7 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
static int verify_xdp_configuration(struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
+ u16 max_xdp_mtu;
if (dev->features & NETIF_F_LRO) {
netdev_warn(dev, "XDP is not supported when LRO is on.\n");
@@ -1747,7 +1651,11 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}
- if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
+ max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr);
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT)
+ max_xdp_mtu -= GVE_RX_PAD;
+
+ if (dev->mtu > max_xdp_mtu) {
netdev_warn(dev, "XDP is not supported for mtu %d.\n",
dev->mtu);
return -EOPNOTSUPP;
@@ -1785,6 +1693,26 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+int gve_init_rss_config(struct gve_priv *priv, u16 num_queues)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+ struct ethtool_rxfh_param rxfh = {0};
+ u16 i;
+
+ if (!priv->cache_rss_config)
+ return 0;
+
+ for (i = 0; i < priv->rss_lut_size; i++)
+ rss_config->hash_lut[i] =
+ ethtool_rxfh_indir_default(i, num_queues);
+
+ netdev_rss_key_fill(rss_config->hash_key, priv->rss_key_size);
+
+ rxfh.hfunc = ETH_RSS_HASH_TOP;
+
+ return gve_adminq_configure_rss(priv, &rxfh);
+}
+
int gve_flow_rules_reset(struct gve_priv *priv)
{
if (!priv->max_flow_rules)
@@ -1832,8 +1760,9 @@ int gve_adjust_config(struct gve_priv *priv,
}
int gve_adjust_queues(struct gve_priv *priv,
- struct gve_queue_config new_rx_config,
- struct gve_queue_config new_tx_config)
+ struct gve_rx_queue_config new_rx_config,
+ struct gve_tx_queue_config new_tx_config,
+ bool reset_rss)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
@@ -1844,14 +1773,19 @@ int gve_adjust_queues(struct gve_priv *priv,
/* Relay the new config from ethtool */
tx_alloc_cfg.qcfg = &new_tx_config;
rx_alloc_cfg.qcfg_tx = &new_tx_config;
- rx_alloc_cfg.qcfg = &new_rx_config;
- tx_alloc_cfg.num_rings = new_tx_config.num_queues;
+ rx_alloc_cfg.qcfg_rx = &new_rx_config;
+ rx_alloc_cfg.reset_rss = reset_rss;
if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
/* Set the config for the next up. */
+ if (reset_rss) {
+ err = gve_init_rss_config(priv, new_rx_config.num_queues);
+ if (err)
+ return err;
+ }
priv->tx_cfg = new_tx_config;
priv->rx_cfg = new_rx_config;
@@ -1880,7 +1814,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_queue_set_napi(priv->dev, idx,
NETDEV_QUEUE_TYPE_TX, NULL);
- napi_disable(&block->napi);
+ napi_disable_locked(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -1891,14 +1825,19 @@ static void gve_turndown(struct gve_priv *priv)
netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
NULL);
- napi_disable(&block->napi);
+ napi_disable_locked(&block->napi);
}
/* Stop tx queues */
netif_tx_disable(priv->dev);
+ xdp_features_clear_redirect_target_locked(priv->dev);
+
gve_clear_napi_enabled(priv);
gve_clear_report_stats(priv);
+
+ /* Make sure that all traffic is finished processing. */
+ synchronize_net();
}
static void gve_turnup(struct gve_priv *priv)
@@ -1916,7 +1855,7 @@ static void gve_turnup(struct gve_priv *priv)
if (!gve_tx_was_added_to_block(priv, idx))
continue;
- napi_enable(&block->napi);
+ napi_enable_locked(&block->napi);
if (idx < priv->tx_cfg.num_queues)
netif_queue_set_napi(priv->dev, idx,
@@ -1944,7 +1883,7 @@ static void gve_turnup(struct gve_priv *priv)
if (!gve_rx_was_added_to_block(priv, idx))
continue;
- napi_enable(&block->napi);
+ napi_enable_locked(&block->napi);
netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
&block->napi);
@@ -1963,6 +1902,9 @@ static void gve_turnup(struct gve_priv *priv)
napi_schedule(&block->napi);
}
+ if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv))
+ xdp_features_set_redirect_target_locked(priv->dev, false);
+
gve_set_napi_enabled(priv);
}
@@ -2136,7 +2078,9 @@ static void gve_handle_reset(struct gve_priv *priv)
if (gve_get_do_reset(priv)) {
rtnl_lock();
+ netdev_lock(priv->dev);
gve_reset(priv, false);
+ netdev_unlock(priv->dev);
rtnl_unlock();
}
}
@@ -2210,7 +2154,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
};
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
- .value = cpu_to_be64(priv->rx[0].fill_cnt),
+ .value = cpu_to_be64(priv->rx[idx].fill_cnt),
.queue_id = cpu_to_be32(idx),
};
}
@@ -2232,14 +2176,17 @@ static void gve_service_task(struct work_struct *work)
static void gve_set_netdev_xdp_features(struct gve_priv *priv)
{
+ xdp_features_t xdp_features;
+
if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
- priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ xdp_features = NETDEV_XDP_ACT_BASIC;
+ xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
- priv->dev->xdp_features = 0;
+ xdp_features = 0;
}
+
+ xdp_set_features_flag_locked(priv->dev, xdp_features);
}
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
@@ -2313,6 +2260,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
priv->rx_cfg.num_queues);
}
+ priv->tx_cfg.num_xdp_queues = 0;
dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
@@ -2693,7 +2641,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
priv->ethtool_flags = 0x0;
- priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
+ priv->rx_cfg.packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_set_probe_in_progress(priv);
@@ -2712,6 +2660,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto abort_with_wq;
+ if (!gve_is_gqi(priv) && !gve_is_qpl(priv))
+ dev->netmem_tx = true;
+
err = register_netdev(dev);
if (err)
goto abort_with_gve_init;
@@ -2769,6 +2720,7 @@ static void gve_shutdown(struct pci_dev *pdev)
bool was_up = netif_running(priv->dev);
rtnl_lock();
+ netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
/* If the dev was up, attempt to close, if close fails, reset */
gve_reset_and_teardown(priv, was_up);
@@ -2776,6 +2728,7 @@ static void gve_shutdown(struct pci_dev *pdev)
/* If the dev wasn't up or close worked, finish tearing down */
gve_teardown_priv_resources(priv);
}
+ netdev_unlock(netdev);
rtnl_unlock();
}
@@ -2788,6 +2741,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
priv->suspend_cnt++;
rtnl_lock();
+ netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
/* If the dev was up, attempt to close, if close fails, reset */
gve_reset_and_teardown(priv, was_up);
@@ -2796,6 +2750,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
gve_teardown_priv_resources(priv);
}
priv->up_before_suspend = was_up;
+ netdev_unlock(netdev);
rtnl_unlock();
return 0;
}
@@ -2808,7 +2763,9 @@ static int gve_resume(struct pci_dev *pdev)
priv->resume_cnt++;
rtnl_lock();
+ netdev_lock(netdev);
err = gve_reset_recovery(priv, priv->up_before_suspend);
+ netdev_unlock(netdev);
rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index acb73d4d0de6..90e875c1832f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -141,12 +141,15 @@ void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
- dma_addr_t addr, struct page *page, __be64 *slot_addr)
+static void gve_setup_rx_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_slot_page_info *page_info,
+ dma_addr_t addr, struct page *page,
+ __be64 *slot_addr)
{
page_info->page = page;
page_info->page_offset = 0;
page_info->page_address = page_address(page);
+ page_info->buf_size = rx->packet_buffer_size;
*slot_addr = cpu_to_be64(addr);
/* The page already has 1 ref */
page_ref_add(page, INT_MAX - 1);
@@ -171,7 +174,7 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
return err;
}
- gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
+ gve_setup_rx_buffer(rx, page_info, dma, page, &data_slot->addr);
return 0;
}
@@ -199,7 +202,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
struct page *page = rx->data.qpl->pages[i];
dma_addr_t addr = i * PAGE_SIZE;
- gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
+ gve_setup_rx_buffer(rx, &rx->data.page_info[i], addr,
+ page,
&rx->data.data_ring[i].qpl_offset);
continue;
}
@@ -222,6 +226,7 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
rx->qpl_copy_pool[j].page = page;
rx->qpl_copy_pool[j].page_offset = 0;
rx->qpl_copy_pool[j].page_address = page_address(page);
+ rx->qpl_copy_pool[j].buf_size = rx->packet_buffer_size;
/* The page already has 1 ref. */
page_ref_add(page, INT_MAX - 1);
@@ -283,6 +288,7 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->gve = priv;
rx->q_num = idx;
+ rx->packet_buffer_size = cfg->packet_buffer_size;
rx->mask = slots - 1;
rx->data.raw_addressing = cfg->raw_addressing;
@@ -351,7 +357,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->db_threshold = slots / 2;
gve_rx_init_ring_state_gqi(rx);
- rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
return 0;
@@ -385,12 +390,12 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
- rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
- for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++) {
err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -419,7 +424,7 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
if (!rx)
return;
- for (i = 0; i < cfg->qcfg->num_queues; i++)
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++)
gve_rx_free_ring_gqi(priv, &rx[i], cfg);
kvfree(rx);
@@ -590,7 +595,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
copy_page_info->pad = page_info->pad;
skb = gve_rx_add_frags(napi, copy_page_info,
- rx->packet_buffer_size, len, ctx);
+ copy_page_info->buf_size, len, ctx);
if (unlikely(!skb))
return NULL;
@@ -630,7 +635,8 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
* device.
*/
if (page_info->can_flip) {
- skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
+ skb = gve_rx_add_frags(napi, page_info, page_info->buf_size,
+ len, ctx);
/* No point in recycling if we didn't get the skb */
if (skb) {
/* Make sure that the page isn't freed. */
@@ -680,7 +686,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
page_info, len, napi,
data_slot,
- rx->packet_buffer_size, ctx);
+ page_info->buf_size, ctx);
} else {
skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
page_info, len, napi, data_slot);
@@ -855,7 +861,7 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
void *old_data;
int xdp_act;
- xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
+ xdp_init_buff(&xdp, page_info->buf_size, &rx->xdp_rxq);
xdp_prepare_buff(&xdp, page_info->page_address +
page_info->page_offset, GVE_RX_PAD,
len, false);
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8ac0047f1ada..dcb0545baa50 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -109,10 +109,13 @@ static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+ struct gve_rx_ring *rx = &priv->rx[idx];
if (!gve_rx_was_added_to_block(priv, idx))
return;
+ if (rx->dqo.page_pool)
+ page_pool_disable_direct_recycling(rx->dqo.page_pool);
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_dqo(priv, idx);
@@ -221,6 +224,15 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
memset(rx, 0, sizeof(*rx));
rx->gve = priv;
rx->q_num = idx;
+ rx->packet_buffer_size = cfg->packet_buffer_size;
+
+ if (cfg->xdp) {
+ rx->packet_buffer_truesize = GVE_XDP_RX_BUFFER_SIZE_DQO;
+ rx->rx_headroom = XDP_PACKET_HEADROOM;
+ } else {
+ rx->packet_buffer_truesize = rx->packet_buffer_size;
+ rx->rx_headroom = 0;
+ }
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
@@ -251,7 +263,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (cfg->raw_addressing) {
- pool = gve_rx_create_page_pool(priv, rx);
+ pool = gve_rx_create_page_pool(priv, rx, cfg->xdp);
if (IS_ERR(pool))
goto err;
@@ -297,12 +309,12 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
int err;
int i;
- rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
- for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++) {
err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -331,7 +343,7 @@ void gve_rx_free_rings_dqo(struct gve_priv *priv,
if (!rx)
return;
- for (i = 0; i < cfg->qcfg->num_queues; i++)
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++)
gve_rx_free_ring_dqo(priv, &rx[i], cfg);
kvfree(rx);
@@ -474,6 +486,25 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
return 0;
}
+static void gve_skb_add_rx_frag(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state,
+ int num_frags, u16 buf_len)
+{
+ if (rx->dqo.page_pool) {
+ skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags,
+ buf_state->page_info.netmem,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad, buf_len,
+ buf_state->page_info.buf_size);
+ } else {
+ skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
+ buf_state->page_info.page,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad, buf_len,
+ buf_state->page_info.buf_size);
+ }
+}
+
/* Chains multi skbs for single rx packet.
* Returns 0 if buffer is appended, -1 otherwise.
*/
@@ -511,14 +542,34 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (gve_rx_should_trigger_copy_ondemand(rx))
return gve_rx_copy_ondemand(rx, buf_state, buf_len);
- skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
- buf_state->page_info.page,
- buf_state->page_info.page_offset,
- buf_len, buf_state->page_info.buf_size);
+ gve_skb_add_rx_frag(rx, buf_state, num_frags, buf_len);
gve_reuse_buffer(rx, buf_state);
return 0;
}
+static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ u64_stats_update_begin(&rx->statss);
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ rx->xdp_actions[xdp_act]++;
+ break;
+ case XDP_TX:
+ rx->xdp_tx_errors++;
+ break;
+ case XDP_REDIRECT:
+ rx->xdp_redirect_errors++;
+ break;
+ }
+ u64_stats_update_end(&rx->statss);
+ gve_free_buffer(rx, buf_state);
+}
+
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -533,6 +584,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve;
+ struct bpf_prog *xprog;
u16 buf_len;
u16 hdr_len;
@@ -559,7 +611,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
- prefetch(buf_state->page_info.page);
+ if (rx->dqo.page_pool) {
+ if (!netmem_is_net_iov(buf_state->page_info.netmem))
+ prefetch(netmem_to_page(buf_state->page_info.netmem));
+ } else {
+ prefetch(buf_state->page_info.page);
+ }
/* Copy the header into the skb in the case of header split */
if (hsplit) {
@@ -588,7 +645,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
/* Sync the portion of dma buffer for CPU to read. */
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
- buf_state->page_info.page_offset,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad,
buf_len, DMA_FROM_DEVICE);
/* Append to current skb if one exists. */
@@ -600,6 +658,34 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
+ xprog = READ_ONCE(priv->xdp_prog);
+ if (xprog) {
+ struct xdp_buff xdp;
+ void *old_data;
+ int xdp_act;
+
+ xdp_init_buff(&xdp, buf_state->page_info.buf_size,
+ &rx->xdp_rxq);
+ xdp_prepare_buff(&xdp,
+ buf_state->page_info.page_address +
+ buf_state->page_info.page_offset,
+ buf_state->page_info.pad,
+ buf_len, false);
+ old_data = xdp.data;
+ xdp_act = bpf_prog_run_xdp(xprog, &xdp);
+ buf_state->page_info.pad += xdp.data - old_data;
+ buf_len = xdp.data_end - xdp.data;
+ if (xdp_act != XDP_PASS) {
+ gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act,
+ buf_state);
+ return 0;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
+ }
+
if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
@@ -630,9 +716,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (rx->dqo.page_pool)
skb_mark_for_recycle(rx->ctx.skb_head);
- skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
- buf_state->page_info.page_offset, buf_len,
- buf_state->page_info.buf_size);
+ gve_skb_add_rx_frag(rx, buf_state, 0, buf_len);
gve_reuse_buffer(rx, buf_state);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index e7fb7d6d283d..1b40bf0c811a 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -206,7 +206,10 @@ void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
return;
gve_remove_napi(priv, ntfy_idx);
- gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ if (tx->q_num < priv->tx_cfg.num_queues)
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ else
+ gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_remove_from_block(priv, idx);
}
@@ -331,27 +334,23 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct gve_tx_ring *tx = cfg->tx;
+ int total_queues;
int err = 0;
int i, j;
- if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
+ if (total_queues > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
- if (cfg->start_idx == 0) {
- tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
- GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
- } else if (!tx) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc tx rings from a nonzero start idx without tx array\n");
- return -EINVAL;
- }
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ for (i = 0; i < total_queues; i++) {
err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -367,8 +366,7 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
cleanup:
for (j = 0; j < i; j++)
gve_tx_free_ring_gqi(priv, &tx[j], cfg);
- if (cfg->start_idx == 0)
- kvfree(tx);
+ kvfree(tx);
return err;
}
@@ -381,13 +379,11 @@ void gve_tx_free_rings_gqi(struct gve_priv *priv,
if (!tx)
return;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
gve_tx_free_ring_gqi(priv, &tx[i], cfg);
- if (cfg->start_idx == 0) {
- kvfree(tx);
- cfg->tx = NULL;
- }
+ kvfree(tx);
+ cfg->tx = NULL;
}
/* gve_tx_avail - Calculates the number of slots available in the ring
@@ -834,11 +830,14 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct gve_tx_ring *tx;
int i, err = 0, qid;
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
return -EINVAL;
+ if (!gve_get_napi_enabled(priv))
+ return -ENETDOWN;
+
qid = gve_xdp_tx_queue_id(priv,
- smp_processor_id() % priv->num_xdp_queues);
+ smp_processor_id() % priv->tx_cfg.num_xdp_queues);
tx = &priv->tx[qid];
@@ -953,13 +952,9 @@ static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
spin_lock(&tx->xdp_lock);
while (sent < budget) {
- if (!gve_can_tx(tx, GVE_TX_START_THRESH))
- goto out;
-
- if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
- tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
+ if (!gve_can_tx(tx, GVE_TX_START_THRESH) ||
+ !xsk_tx_peek_desc(tx->xsk_pool, &desc))
goto out;
- }
data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
@@ -975,33 +970,41 @@ out:
return sent;
}
+int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
+{
+ struct gve_rx_ring *rx = rx_block->rx;
+ struct gve_priv *priv = rx->gve;
+ struct gve_tx_ring *tx;
+ int sent = 0;
+
+ tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
+ if (tx->xsk_pool) {
+ sent = gve_xsk_tx(priv, tx, budget);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xsk_sent += sent;
+ u64_stats_update_end(&tx->statss);
+ if (xsk_uses_need_wakeup(tx->xsk_pool))
+ xsk_set_tx_need_wakeup(tx->xsk_pool);
+ }
+
+ return sent;
+}
+
bool gve_xdp_poll(struct gve_notify_block *block, int budget)
{
struct gve_priv *priv = block->priv;
struct gve_tx_ring *tx = block->tx;
u32 nic_done;
- bool repoll;
u32 to_do;
/* Find out how much work there is to be done */
nic_done = gve_tx_load_event_counter(priv, tx);
to_do = min_t(u32, (nic_done - tx->done), budget);
gve_clean_xdp_done(priv, tx, to_do);
- repoll = nic_done != tx->done;
-
- if (tx->xsk_pool) {
- int sent = gve_xsk_tx(priv, tx, budget);
-
- u64_stats_update_begin(&tx->statss);
- tx->xdp_xsk_sent += sent;
- u64_stats_update_end(&tx->statss);
- repoll |= (sent == budget);
- if (xsk_uses_need_wakeup(tx->xsk_pool))
- xsk_set_tx_need_wakeup(tx->xsk_pool);
- }
/* If we still have work we want to repoll */
- return repoll;
+ return nic_done != tx->done;
}
bool gve_tx_poll(struct gve_notify_block *block, int budget)
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index f879426cb552..9d705d94b065 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -379,27 +379,23 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct gve_tx_ring *tx = cfg->tx;
+ int total_queues;
int err = 0;
int i, j;
- if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
+ if (total_queues > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
- if (cfg->start_idx == 0) {
- tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
- GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
- } else if (!tx) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc tx rings from a nonzero start idx without tx array\n");
- return -EINVAL;
- }
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ for (i = 0; i < total_queues; i++) {
err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -415,8 +411,7 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
err:
for (j = 0; j < i; j++)
gve_tx_free_ring_dqo(priv, &tx[j], cfg);
- if (cfg->start_idx == 0)
- kvfree(tx);
+ kvfree(tx);
return err;
}
@@ -429,13 +424,11 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv,
if (!tx)
return;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
gve_tx_free_ring_dqo(priv, &tx[i], cfg);
- if (cfg->start_idx == 0) {
- kvfree(tx);
- cfg->tx = NULL;
- }
+ kvfree(tx);
+ cfg->tx = NULL;
}
/* Returns the number of slots available in the ring */
@@ -667,7 +660,8 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
goto err;
dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
- dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), pkt,
+ dma[pkt->num_bufs], addr);
++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
@@ -770,6 +764,9 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
s16 completion_tag;
pkt = gve_alloc_pending_packet(tx);
+ if (!pkt)
+ return -ENOMEM;
+
pkt->skb = skb;
completion_tag = pkt - tx->dqo.pending_packets;
@@ -1045,8 +1042,9 @@ static void gve_unmap_packet(struct device *dev,
dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
for (i = 1; i < pkt->num_bufs; i++) {
- dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
- dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
+ DMA_TO_DEVICE, 0);
}
pkt->num_bufs = 0;
}
@@ -1146,8 +1144,7 @@ static void gve_handle_miss_completion(struct gve_priv *priv,
/* jiffies can wraparound but time comparisons can handle overflows. */
pending_packet->timeout_jiffies =
jiffies +
- msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT *
- MSEC_PER_SEC);
+ secs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT);
add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet);
*bytes += pending_packet->skb->len;
@@ -1191,8 +1188,7 @@ static void remove_miss_completions(struct gve_priv *priv,
pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL;
pending_packet->timeout_jiffies =
jiffies +
- msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT *
- MSEC_PER_SEC);
+ secs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT);
/* Maintain pending packet in another list so the packet can be
* unallocated at a later time.
*/
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 30fef100257e..ace9b8698021 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -110,13 +110,13 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_add(priv->dev, &block->napi, gve_poll);
- netif_napi_set_irq(&block->napi, block->irq);
+ netif_napi_add_locked(priv->dev, &block->napi, gve_poll);
+ netif_napi_set_irq_locked(&block->napi, block->irq);
}
void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_del(&block->napi);
+ netif_napi_del_locked(&block->napi);
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
index ae58ac38c206..1a9da564b306 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -5,4 +5,5 @@
obj-$(CONFIG_HIBMCGE) += hibmcge.o
-hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o
+hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \
+ hbg_debugfs.o hbg_err.o hbg_diagnose.o
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index 96daf058d387..7725cb0c5c8a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -4,6 +4,7 @@
#ifndef __HBG_COMMON_H
#define __HBG_COMMON_H
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include "hbg_reg.h"
@@ -33,6 +34,16 @@ enum hbg_tx_state {
enum hbg_nic_state {
HBG_NIC_STATE_EVENT_HANDLING = 0,
+ HBG_NIC_STATE_RESETTING,
+ HBG_NIC_STATE_RESET_FAIL,
+ HBG_NIC_STATE_NEED_RESET, /* trigger a reset in scheduled task */
+ HBG_NIC_STATE_NP_LINK_FAIL,
+};
+
+enum hbg_reset_type {
+ HBG_RESET_TYPE_NONE = 0,
+ HBG_RESET_TYPE_FLR,
+ HBG_RESET_TYPE_FUNCTION,
};
struct hbg_buffer {
@@ -72,6 +83,7 @@ enum hbg_hw_event_type {
HBG_HW_EVENT_NONE = 0,
HBG_HW_EVENT_INIT, /* driver is loading */
HBG_HW_EVENT_RESET,
+ HBG_HW_EVENT_CORE_RESET,
};
struct hbg_dev_specs {
@@ -84,6 +96,7 @@ struct hbg_dev_specs {
u32 vlan_layers;
u32 max_mtu;
u32 min_mtu;
+ u32 uc_mac_num;
u32 max_frame_len;
u32 rx_buf_size;
@@ -94,14 +107,17 @@ struct hbg_irq_info {
u32 mask;
bool re_enable;
bool need_print;
- u64 count;
+ bool need_reset;
- void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info);
+ void (*irq_handle)(struct hbg_priv *priv,
+ const struct hbg_irq_info *info);
};
struct hbg_vector {
char name[HBG_VECTOR_NUM][32];
- struct hbg_irq_info *info_array;
+
+ u64 *stats_array;
+ const struct hbg_irq_info *info_array;
u32 info_array_len;
};
@@ -114,6 +130,134 @@ struct hbg_mac {
u32 duplex;
u32 autoneg;
u32 link_status;
+ u32 pause_autoneg;
+};
+
+struct hbg_mac_table_entry {
+ u8 addr[ETH_ALEN];
+};
+
+struct hbg_mac_filter {
+ struct hbg_mac_table_entry *mac_table;
+ u32 table_max_len;
+ bool enabled;
+};
+
+/* saved for restore after rest */
+struct hbg_user_def {
+ struct ethtool_pauseparam pause_param;
+};
+
+struct hbg_stats {
+ u64 rx_desc_drop;
+ u64 rx_desc_l2_err_cnt;
+ u64 rx_desc_pkt_len_err_cnt;
+ u64 rx_desc_l3l4_err_cnt;
+ u64 rx_desc_l3_wrong_head_cnt;
+ u64 rx_desc_l3_csum_err_cnt;
+ u64 rx_desc_l3_len_err_cnt;
+ u64 rx_desc_l3_zero_ttl_cnt;
+ u64 rx_desc_l3_other_cnt;
+ u64 rx_desc_l4_err_cnt;
+ u64 rx_desc_l4_wrong_head_cnt;
+ u64 rx_desc_l4_len_err_cnt;
+ u64 rx_desc_l4_csum_err_cnt;
+ u64 rx_desc_l4_zero_port_num_cnt;
+ u64 rx_desc_l4_other_cnt;
+ u64 rx_desc_frag_cnt;
+ u64 rx_desc_ip_ver_err_cnt;
+ u64 rx_desc_ipv4_pkt_cnt;
+ u64 rx_desc_ipv6_pkt_cnt;
+ u64 rx_desc_no_ip_pkt_cnt;
+ u64 rx_desc_ip_pkt_cnt;
+ u64 rx_desc_tcp_pkt_cnt;
+ u64 rx_desc_udp_pkt_cnt;
+ u64 rx_desc_vlan_pkt_cnt;
+ u64 rx_desc_icmp_pkt_cnt;
+ u64 rx_desc_arp_pkt_cnt;
+ u64 rx_desc_rarp_pkt_cnt;
+ u64 rx_desc_multicast_pkt_cnt;
+ u64 rx_desc_broadcast_pkt_cnt;
+ u64 rx_desc_ipsec_pkt_cnt;
+ u64 rx_desc_ip_opt_pkt_cnt;
+ u64 rx_desc_key_not_match_cnt;
+
+ u64 rx_octets_total_ok_cnt;
+ u64 rx_uc_pkt_cnt;
+ u64 rx_mc_pkt_cnt;
+ u64 rx_bc_pkt_cnt;
+ u64 rx_vlan_pkt_cnt;
+ u64 rx_octets_bad_cnt;
+ u64 rx_octets_total_filt_cnt;
+ u64 rx_filt_pkt_cnt;
+ u64 rx_trans_pkt_cnt;
+ u64 rx_framesize_64;
+ u64 rx_framesize_65_127;
+ u64 rx_framesize_128_255;
+ u64 rx_framesize_256_511;
+ u64 rx_framesize_512_1023;
+ u64 rx_framesize_1024_1518;
+ u64 rx_framesize_bt_1518;
+ u64 rx_fcs_error_cnt;
+ u64 rx_data_error_cnt;
+ u64 rx_align_error_cnt;
+ u64 rx_pause_macctl_frame_cnt;
+ u64 rx_unknown_macctl_frame_cnt;
+ /* crc ok, > max_frm_size, < 2max_frm_size */
+ u64 rx_frame_long_err_cnt;
+ /* crc fail, > max_frm_size, < 2max_frm_size */
+ u64 rx_jabber_err_cnt;
+ /* > 2max_frm_size */
+ u64 rx_frame_very_long_err_cnt;
+ /* < 64byte, >= short_runts_thr */
+ u64 rx_frame_runt_err_cnt;
+ /* < short_runts_thr */
+ u64 rx_frame_short_err_cnt;
+ /* PCU: dropped when the RX FIFO is full.*/
+ u64 rx_overflow_cnt;
+ /* GMAC: the count of overflows of the RX FIFO */
+ u64 rx_overrun_cnt;
+ /* PCU: the count of buffer alloc errors in RX */
+ u64 rx_bufrq_err_cnt;
+ /* PCU: the count of write descriptor errors in RX */
+ u64 rx_we_err_cnt;
+ /* GMAC: the count of pkts that contain PAD but length is not 64 */
+ u64 rx_lengthfield_err_cnt;
+ u64 rx_fail_comma_cnt;
+
+ u64 rx_dma_err_cnt;
+ u64 rx_fifo_less_empty_thrsld_cnt;
+
+ u64 tx_octets_total_ok_cnt;
+ u64 tx_uc_pkt_cnt;
+ u64 tx_mc_pkt_cnt;
+ u64 tx_bc_pkt_cnt;
+ u64 tx_vlan_pkt_cnt;
+ u64 tx_octets_bad_cnt;
+ u64 tx_trans_pkt_cnt;
+ u64 tx_pause_frame_cnt;
+ u64 tx_framesize_64;
+ u64 tx_framesize_65_127;
+ u64 tx_framesize_128_255;
+ u64 tx_framesize_256_511;
+ u64 tx_framesize_512_1023;
+ u64 tx_framesize_1024_1518;
+ u64 tx_framesize_bt_1518;
+ /* GMAC: the count of times that frames fail to be transmitted
+ * due to internal errors.
+ */
+ u64 tx_underrun_err_cnt;
+ u64 tx_add_cs_fail_cnt;
+ /* PCU: the count of buffer free errors in TX */
+ u64 tx_bufrl_err_cnt;
+ u64 tx_crc_err_cnt;
+ u64 tx_drop_cnt;
+ u64 tx_excessive_length_drop_cnt;
+
+ u64 tx_timeout_cnt;
+ u64 tx_dma_err_cnt;
+
+ u64 np_link_fail_cnt;
};
struct hbg_priv {
@@ -126,6 +270,15 @@ struct hbg_priv {
struct hbg_vector vectors;
struct hbg_ring tx_ring;
struct hbg_ring rx_ring;
+ struct hbg_mac_filter filter;
+ enum hbg_reset_type reset_type;
+ struct hbg_user_def user_def;
+ struct hbg_stats stats;
+ unsigned long last_update_stats_time;
+ struct delayed_work service_task;
};
+void hbg_err_reset_task_schedule(struct hbg_priv *priv);
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
new file mode 100644
index 000000000000..01ad82d2f5cc
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+#include "hbg_common.h"
+#include "hbg_debugfs.h"
+#include "hbg_hw.h"
+#include "hbg_irq.h"
+#include "hbg_txrx.h"
+
+static struct dentry *hbg_dbgfs_root;
+
+struct hbg_dbg_info {
+ const char *name;
+ int (*read)(struct seq_file *seq, void *data);
+};
+
+#define state_str_true_false(p, s) str_true_false(test_bit(s, &(p)->state))
+
+static void hbg_dbg_ring(struct hbg_priv *priv, struct hbg_ring *ring,
+ struct seq_file *s)
+{
+ u32 irq_mask = ring->dir == HBG_DIR_TX ? HBG_INT_MSK_TX_B :
+ HBG_INT_MSK_RX_B;
+
+ seq_printf(s, "ring used num: %u\n",
+ hbg_get_queue_used_num(ring));
+ seq_printf(s, "ring max num: %u\n", ring->len);
+ seq_printf(s, "ring head: %u, tail: %u\n", ring->head, ring->tail);
+ seq_printf(s, "fifo used num: %u\n",
+ hbg_hw_get_fifo_used_num(priv, ring->dir));
+ seq_printf(s, "fifo max num: %u\n",
+ hbg_get_spec_fifo_max_num(priv, ring->dir));
+ seq_printf(s, "irq enabled: %s\n",
+ str_true_false(hbg_hw_irq_is_enabled(priv, irq_mask)));
+}
+
+static int hbg_dbg_tx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->tx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_rx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->rx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ const struct hbg_irq_info *info;
+ u32 i;
+
+ for (i = 0; i < priv->vectors.info_array_len; i++) {
+ info = &priv->vectors.info_array[i];
+ seq_printf(s,
+ "%-20s: enabled: %-5s, reset: %-5s, logged: %-5s, count: %llu\n",
+ info->name,
+ str_true_false(hbg_hw_irq_is_enabled(priv,
+ info->mask)),
+ str_true_false(info->need_reset),
+ str_true_false(info->need_print),
+ priv->vectors.stats_array[i]);
+ }
+
+ return 0;
+}
+
+static int hbg_dbg_mac_table(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_mac_filter *filter;
+ u32 i;
+
+ filter = &priv->filter;
+ seq_printf(s, "mac addr max count: %u\n", filter->table_max_len);
+ seq_printf(s, "filter enabled: %s\n", str_true_false(filter->enabled));
+
+ for (i = 0; i < filter->table_max_len; i++) {
+ if (is_zero_ether_addr(filter->mac_table[i].addr))
+ continue;
+
+ seq_printf(s, "[%u] %pM\n", i, filter->mac_table[i].addr);
+ }
+
+ return 0;
+}
+
+static const char * const reset_type_str[] = {"None", "FLR", "Function"};
+
+static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ bool np_link_fail;
+
+ seq_printf(s, "event handling state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_EVENT_HANDLING));
+ seq_printf(s, "resetting state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESETTING));
+ seq_printf(s, "reset fail state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL));
+ seq_printf(s, "last reset type: %s\n",
+ reset_type_str[priv->reset_type]);
+ seq_printf(s, "need reset state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_NEED_RESET));
+
+ np_link_fail = !hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
+ seq_printf(s, "np_link fail state: %s\n", str_true_false(np_link_fail));
+
+ return 0;
+}
+
+static const struct hbg_dbg_info hbg_dbg_infos[] = {
+ { "tx_ring", hbg_dbg_tx_ring },
+ { "rx_ring", hbg_dbg_rx_ring },
+ { "irq_info", hbg_dbg_irq_info },
+ { "mac_table", hbg_dbg_mac_table },
+ { "nic_state", hbg_dbg_nic_state },
+};
+
+static void hbg_debugfs_uninit(void *data)
+{
+ debugfs_remove_recursive((struct dentry *)data);
+}
+
+void hbg_debugfs_init(struct hbg_priv *priv)
+{
+ const char *name = pci_name(priv->pdev);
+ struct device *dev = &priv->pdev->dev;
+ struct dentry *root;
+ u32 i;
+
+ root = debugfs_create_dir(name, hbg_dbgfs_root);
+
+ for (i = 0; i < ARRAY_SIZE(hbg_dbg_infos); i++)
+ debugfs_create_devm_seqfile(dev, hbg_dbg_infos[i].name,
+ root, hbg_dbg_infos[i].read);
+
+ /* Ignore the failure because debugfs is not a key feature. */
+ devm_add_action_or_reset(dev, hbg_debugfs_uninit, root);
+}
+
+void hbg_debugfs_register(void)
+{
+ hbg_dbgfs_root = debugfs_create_dir("hibmcge", NULL);
+}
+
+void hbg_debugfs_unregister(void)
+{
+ debugfs_remove_recursive(hbg_dbgfs_root);
+ hbg_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
new file mode 100644
index 000000000000..80670d66bbeb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_DEBUGFS_H
+#define __HBG_DEBUGFS_H
+
+void hbg_debugfs_register(void);
+void hbg_debugfs_unregister(void);
+
+void hbg_debugfs_init(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
new file mode 100644
index 000000000000..f23fb5920c3c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2025 Hisilicon Limited.
+
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include "hbg_common.h"
+#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+#include "hbg_diagnose.h"
+
+#define HBG_MSG_DATA_MAX_NUM 64
+
+struct hbg_diagnose_message {
+ u32 opcode;
+ u32 status;
+ u32 data_num;
+ struct hbg_priv *priv;
+
+ u32 data[HBG_MSG_DATA_MAX_NUM];
+};
+
+#define HBG_HW_PUSH_WAIT_TIMEOUT_US (2 * 1000 * 1000)
+#define HBG_HW_PUSH_WAIT_INTERVAL_US (1 * 1000)
+
+enum hbg_push_cmd {
+ HBG_PUSH_CMD_IRQ = 0,
+ HBG_PUSH_CMD_STATS,
+ HBG_PUSH_CMD_LINK,
+};
+
+struct hbg_push_stats_info {
+ /* id is used to match the name of the current stats item.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u64 offset;
+};
+
+struct hbg_push_irq_info {
+ /* id is used to match the name of the current irq.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u32 mask;
+};
+
+#define HBG_PUSH_IRQ_I(name, id) {id, HBG_INT_MSK_##name##_B}
+static const struct hbg_push_irq_info hbg_push_irq_list[] = {
+ HBG_PUSH_IRQ_I(RX, 0),
+ HBG_PUSH_IRQ_I(TX, 1),
+ HBG_PUSH_IRQ_I(TX_PKT_CPL, 2),
+ HBG_PUSH_IRQ_I(MAC_MII_FIFO_ERR, 3),
+ HBG_PUSH_IRQ_I(MAC_PCS_RX_FIFO_ERR, 4),
+ HBG_PUSH_IRQ_I(MAC_PCS_TX_FIFO_ERR, 5),
+ HBG_PUSH_IRQ_I(MAC_APP_RX_FIFO_ERR, 6),
+ HBG_PUSH_IRQ_I(MAC_APP_TX_FIFO_ERR, 7),
+ HBG_PUSH_IRQ_I(SRAM_PARITY_ERR, 8),
+ HBG_PUSH_IRQ_I(TX_AHB_ERR, 9),
+ HBG_PUSH_IRQ_I(RX_BUF_AVL, 10),
+ HBG_PUSH_IRQ_I(REL_BUF_ERR, 11),
+ HBG_PUSH_IRQ_I(TXCFG_AVL, 12),
+ HBG_PUSH_IRQ_I(TX_DROP, 13),
+ HBG_PUSH_IRQ_I(RX_DROP, 14),
+ HBG_PUSH_IRQ_I(RX_AHB_ERR, 15),
+ HBG_PUSH_IRQ_I(MAC_FIFO_ERR, 16),
+ HBG_PUSH_IRQ_I(RBREQ_ERR, 17),
+ HBG_PUSH_IRQ_I(WE_ERR, 18),
+};
+
+#define HBG_PUSH_STATS_I(name, id) {id, HBG_STATS_FIELD_OFF(name)}
+static const struct hbg_push_stats_info hbg_push_stats_list[] = {
+ HBG_PUSH_STATS_I(rx_desc_drop, 0),
+ HBG_PUSH_STATS_I(rx_desc_l2_err_cnt, 1),
+ HBG_PUSH_STATS_I(rx_desc_pkt_len_err_cnt, 2),
+ HBG_PUSH_STATS_I(rx_desc_l3_wrong_head_cnt, 3),
+ HBG_PUSH_STATS_I(rx_desc_l3_csum_err_cnt, 4),
+ HBG_PUSH_STATS_I(rx_desc_l3_len_err_cnt, 5),
+ HBG_PUSH_STATS_I(rx_desc_l3_zero_ttl_cnt, 6),
+ HBG_PUSH_STATS_I(rx_desc_l3_other_cnt, 7),
+ HBG_PUSH_STATS_I(rx_desc_l4_err_cnt, 8),
+ HBG_PUSH_STATS_I(rx_desc_l4_wrong_head_cnt, 9),
+ HBG_PUSH_STATS_I(rx_desc_l4_len_err_cnt, 10),
+ HBG_PUSH_STATS_I(rx_desc_l4_csum_err_cnt, 11),
+ HBG_PUSH_STATS_I(rx_desc_l4_zero_port_num_cnt, 12),
+ HBG_PUSH_STATS_I(rx_desc_l4_other_cnt, 13),
+ HBG_PUSH_STATS_I(rx_desc_frag_cnt, 14),
+ HBG_PUSH_STATS_I(rx_desc_ip_ver_err_cnt, 15),
+ HBG_PUSH_STATS_I(rx_desc_ipv4_pkt_cnt, 16),
+ HBG_PUSH_STATS_I(rx_desc_ipv6_pkt_cnt, 17),
+ HBG_PUSH_STATS_I(rx_desc_no_ip_pkt_cnt, 18),
+ HBG_PUSH_STATS_I(rx_desc_ip_pkt_cnt, 19),
+ HBG_PUSH_STATS_I(rx_desc_tcp_pkt_cnt, 20),
+ HBG_PUSH_STATS_I(rx_desc_udp_pkt_cnt, 21),
+ HBG_PUSH_STATS_I(rx_desc_vlan_pkt_cnt, 22),
+ HBG_PUSH_STATS_I(rx_desc_icmp_pkt_cnt, 23),
+ HBG_PUSH_STATS_I(rx_desc_arp_pkt_cnt, 24),
+ HBG_PUSH_STATS_I(rx_desc_rarp_pkt_cnt, 25),
+ HBG_PUSH_STATS_I(rx_desc_multicast_pkt_cnt, 26),
+ HBG_PUSH_STATS_I(rx_desc_broadcast_pkt_cnt, 27),
+ HBG_PUSH_STATS_I(rx_desc_ipsec_pkt_cnt, 28),
+ HBG_PUSH_STATS_I(rx_desc_ip_opt_pkt_cnt, 29),
+ HBG_PUSH_STATS_I(rx_desc_key_not_match_cnt, 30),
+ HBG_PUSH_STATS_I(rx_octets_total_ok_cnt, 31),
+ HBG_PUSH_STATS_I(rx_uc_pkt_cnt, 32),
+ HBG_PUSH_STATS_I(rx_mc_pkt_cnt, 33),
+ HBG_PUSH_STATS_I(rx_bc_pkt_cnt, 34),
+ HBG_PUSH_STATS_I(rx_vlan_pkt_cnt, 35),
+ HBG_PUSH_STATS_I(rx_octets_bad_cnt, 36),
+ HBG_PUSH_STATS_I(rx_octets_total_filt_cnt, 37),
+ HBG_PUSH_STATS_I(rx_filt_pkt_cnt, 38),
+ HBG_PUSH_STATS_I(rx_trans_pkt_cnt, 39),
+ HBG_PUSH_STATS_I(rx_framesize_64, 40),
+ HBG_PUSH_STATS_I(rx_framesize_65_127, 41),
+ HBG_PUSH_STATS_I(rx_framesize_128_255, 42),
+ HBG_PUSH_STATS_I(rx_framesize_256_511, 43),
+ HBG_PUSH_STATS_I(rx_framesize_512_1023, 44),
+ HBG_PUSH_STATS_I(rx_framesize_1024_1518, 45),
+ HBG_PUSH_STATS_I(rx_framesize_bt_1518, 46),
+ HBG_PUSH_STATS_I(rx_fcs_error_cnt, 47),
+ HBG_PUSH_STATS_I(rx_data_error_cnt, 48),
+ HBG_PUSH_STATS_I(rx_align_error_cnt, 49),
+ HBG_PUSH_STATS_I(rx_frame_long_err_cnt, 50),
+ HBG_PUSH_STATS_I(rx_jabber_err_cnt, 51),
+ HBG_PUSH_STATS_I(rx_pause_macctl_frame_cnt, 52),
+ HBG_PUSH_STATS_I(rx_unknown_macctl_frame_cnt, 53),
+ HBG_PUSH_STATS_I(rx_frame_very_long_err_cnt, 54),
+ HBG_PUSH_STATS_I(rx_frame_runt_err_cnt, 55),
+ HBG_PUSH_STATS_I(rx_frame_short_err_cnt, 56),
+ HBG_PUSH_STATS_I(rx_overflow_cnt, 57),
+ HBG_PUSH_STATS_I(rx_bufrq_err_cnt, 58),
+ HBG_PUSH_STATS_I(rx_we_err_cnt, 59),
+ HBG_PUSH_STATS_I(rx_overrun_cnt, 60),
+ HBG_PUSH_STATS_I(rx_lengthfield_err_cnt, 61),
+ HBG_PUSH_STATS_I(rx_fail_comma_cnt, 62),
+ HBG_PUSH_STATS_I(rx_dma_err_cnt, 63),
+ HBG_PUSH_STATS_I(rx_fifo_less_empty_thrsld_cnt, 64),
+ HBG_PUSH_STATS_I(tx_octets_total_ok_cnt, 65),
+ HBG_PUSH_STATS_I(tx_uc_pkt_cnt, 66),
+ HBG_PUSH_STATS_I(tx_mc_pkt_cnt, 67),
+ HBG_PUSH_STATS_I(tx_bc_pkt_cnt, 68),
+ HBG_PUSH_STATS_I(tx_vlan_pkt_cnt, 69),
+ HBG_PUSH_STATS_I(tx_octets_bad_cnt, 70),
+ HBG_PUSH_STATS_I(tx_trans_pkt_cnt, 71),
+ HBG_PUSH_STATS_I(tx_pause_frame_cnt, 72),
+ HBG_PUSH_STATS_I(tx_framesize_64, 73),
+ HBG_PUSH_STATS_I(tx_framesize_65_127, 74),
+ HBG_PUSH_STATS_I(tx_framesize_128_255, 75),
+ HBG_PUSH_STATS_I(tx_framesize_256_511, 76),
+ HBG_PUSH_STATS_I(tx_framesize_512_1023, 77),
+ HBG_PUSH_STATS_I(tx_framesize_1024_1518, 78),
+ HBG_PUSH_STATS_I(tx_framesize_bt_1518, 79),
+ HBG_PUSH_STATS_I(tx_underrun_err_cnt, 80),
+ HBG_PUSH_STATS_I(tx_add_cs_fail_cnt, 81),
+ HBG_PUSH_STATS_I(tx_bufrl_err_cnt, 82),
+ HBG_PUSH_STATS_I(tx_crc_err_cnt, 83),
+ HBG_PUSH_STATS_I(tx_drop_cnt, 84),
+ HBG_PUSH_STATS_I(tx_excessive_length_drop_cnt, 85),
+ HBG_PUSH_STATS_I(tx_dma_err_cnt, 86),
+};
+
+static int hbg_push_msg_send(struct hbg_priv *priv,
+ struct hbg_diagnose_message *msg)
+{
+ u32 header = 0;
+ u32 i;
+
+ if (msg->data_num == 0)
+ return 0;
+
+ for (i = 0; i < msg->data_num && i < HBG_MSG_DATA_MAX_NUM; i++)
+ hbg_reg_write(priv,
+ HBG_REG_MSG_DATA_BASE_ADDR + i * sizeof(u32),
+ msg->data[i]);
+
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_OPCODE_M, msg->opcode);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_DATA_NUM_M, msg->data_num);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_RESP_CODE_M, ETIMEDOUT);
+
+ /* start status */
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_STATUS_M, 1);
+
+ /* write header msg to start push */
+ hbg_reg_write(priv, HBG_REG_MSG_HEADER_ADDR, header);
+
+ /* wait done */
+ readl_poll_timeout(priv->io_base + HBG_REG_MSG_HEADER_ADDR, header,
+ !FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header),
+ HBG_HW_PUSH_WAIT_INTERVAL_US,
+ HBG_HW_PUSH_WAIT_TIMEOUT_US);
+
+ msg->status = FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header);
+ return -(int)FIELD_GET(HBG_REG_MSG_HEADER_RESP_CODE_M, header);
+}
+
+static int hbg_push_data(struct hbg_priv *priv,
+ u32 opcode, u32 *data, u32 data_num)
+{
+ struct hbg_diagnose_message msg = {0};
+ u32 data_left_num;
+ u32 i, j;
+ int ret;
+
+ msg.priv = priv;
+ msg.opcode = opcode;
+ for (i = 0; i < data_num / HBG_MSG_DATA_MAX_NUM + 1; i++) {
+ if (i * HBG_MSG_DATA_MAX_NUM >= data_num)
+ break;
+
+ data_left_num = data_num - i * HBG_MSG_DATA_MAX_NUM;
+ for (j = 0; j < data_left_num && j < HBG_MSG_DATA_MAX_NUM; j++)
+ msg.data[j] = data[i * HBG_MSG_DATA_MAX_NUM + j];
+
+ msg.data_num = j;
+ ret = hbg_push_msg_send(priv, &msg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hbg_push_data_u64(struct hbg_priv *priv, u32 opcode,
+ u64 *data, u32 data_num)
+{
+ /* The length of u64 is twice that of u32,
+ * the data_num must be multiplied by 2.
+ */
+ return hbg_push_data(priv, opcode, (u32 *)data, data_num * 2);
+}
+
+static u64 hbg_get_irq_stats(struct hbg_vector *vectors, u32 mask)
+{
+ u32 i = 0;
+
+ for (i = 0; i < vectors->info_array_len; i++)
+ if (vectors->info_array[i].mask == mask)
+ return vectors->stats_array[i];
+
+ return 0;
+}
+
+static int hbg_push_irq_cnt(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u32 data_num = ARRAY_SIZE(hbg_push_irq_list) * 2;
+ struct hbg_vector *vectors = &priv->vectors;
+ const struct hbg_push_irq_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_irq_list[j++];
+ data[i] = info->id;
+ data[i + 1] = hbg_get_irq_stats(vectors, info->mask);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_IRQ, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+static int hbg_push_link_status(struct hbg_priv *priv)
+{
+ u32 link_status[2];
+
+ /* phy link status */
+ link_status[0] = priv->mac.phydev->link;
+ /* mac link status */
+ link_status[1] = hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
+
+ return hbg_push_data(priv, HBG_PUSH_CMD_LINK,
+ link_status, ARRAY_SIZE(link_status));
+}
+
+static int hbg_push_stats(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u64 data_num = ARRAY_SIZE(hbg_push_stats_list) * 2;
+ struct hbg_stats *stats = &priv->stats;
+ const struct hbg_push_stats_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_stats_list[j++];
+ data[i] = info->id;
+ data[i + 1] = HBG_STATS_R(stats, info->offset);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_STATS, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+void hbg_diagnose_message_push(struct hbg_priv *priv)
+{
+ int ret;
+
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ /* only 1 is the right value */
+ if (hbg_reg_read(priv, HBG_REG_PUSH_REQ_ADDR) != 1)
+ return;
+
+ ret = hbg_push_irq_cnt(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push irq cnt, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_link_status(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push link status, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_stats(priv);
+ if (ret)
+ dev_err(&priv->pdev->dev,
+ "failed to push stats, ret = %d\n", ret);
+
+push_done:
+ hbg_reg_write(priv, HBG_REG_PUSH_REQ_ADDR, 0);
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
new file mode 100644
index 000000000000..ba04c6d8c03d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2025 Hisilicon Limited. */
+
+#ifndef __HBG_DIAGNOSE_H
+#define __HBG_DIAGNOSE_H
+
+#include "hbg_common.h"
+
+void hbg_diagnose_message_push(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
new file mode 100644
index 000000000000..ff3295b60a69
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
+#include "hbg_hw.h"
+
+static void hbg_restore_mac_table(struct hbg_priv *priv)
+{
+ struct hbg_mac_filter *filter = &priv->filter;
+ u64 addr;
+ u32 i;
+
+ for (i = 0; i < filter->table_max_len; i++)
+ if (!is_zero_ether_addr(filter->mac_table[i].addr)) {
+ addr = ether_addr_to_u64(filter->mac_table[i].addr);
+ hbg_hw_set_uc_addr(priv, addr, i);
+ }
+
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_restore_user_def_settings(struct hbg_priv *priv)
+{
+ /* The index of host mac is always 0. */
+ u64 rx_pause_addr = ether_addr_to_u64(priv->filter.mac_table[0].addr);
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ hbg_restore_mac_table(priv);
+ hbg_hw_set_mtu(priv, priv->netdev->mtu);
+ hbg_hw_set_pause_enable(priv, pause_param->tx_pause,
+ pause_param->rx_pause);
+ hbg_hw_set_rx_pause_mac_addr(priv, rx_pause_addr);
+}
+
+int hbg_rebuild(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_hw_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_restore_user_def_settings(priv);
+ return 0;
+}
+
+static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (netif_running(priv->netdev)) {
+ dev_warn(&priv->pdev->dev,
+ "failed to reset because port is up\n");
+ return -EBUSY;
+ }
+
+ netif_device_detach(priv->netdev);
+
+ priv->reset_type = type;
+ set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
+ if (ret) {
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ }
+
+ return ret;
+}
+
+static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ if (!test_bit(HBG_NIC_STATE_RESETTING, &priv->state) ||
+ type != priv->reset_type)
+ return 0;
+
+ ASSERT_RTNL();
+
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ ret = hbg_rebuild(priv);
+ if (ret) {
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
+ return ret;
+ }
+
+ netif_device_attach(priv->netdev);
+
+ dev_info(&priv->pdev->dev, "reset done\n");
+ return ret;
+}
+
+/* must be protected by rtnl lock */
+int hbg_reset(struct hbg_priv *priv)
+{
+ int ret;
+
+ ASSERT_RTNL();
+ ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION);
+ if (ret)
+ return ret;
+
+ return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION);
+}
+
+void hbg_err_reset(struct hbg_priv *priv)
+{
+ bool running;
+
+ rtnl_lock();
+ running = netif_running(priv->netdev);
+ if (running)
+ dev_close(priv->netdev);
+
+ if (hbg_reset(priv))
+ goto err_unlock;
+
+ if (running)
+ dev_open(priv->netdev, NULL);
+
+err_unlock:
+ rtnl_unlock();
+}
+
+static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "failed to re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ hbg_err_reset(priv);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ rtnl_lock();
+ hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR);
+}
+
+static void hbg_pci_err_reset_done(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_reset_done(priv, HBG_RESET_TYPE_FLR);
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers hbg_pci_err_handler = {
+ .error_detected = hbg_pci_err_detected,
+ .slot_reset = hbg_pci_err_slot_reset,
+ .reset_prepare = hbg_pci_err_reset_prepare,
+ .reset_done = hbg_pci_err_reset_done,
+};
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv)
+{
+ pdrv->err_handler = &hbg_pci_err_handler;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
new file mode 100644
index 000000000000..fb9fbe7004e8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_ERR_H
+#define __HBG_ERR_H
+
+#include <linux/pci.h>
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv);
+int hbg_reset(struct hbg_priv *priv);
+int hbg_rebuild(struct hbg_priv *priv);
+void hbg_err_reset(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index c3370114aef3..55520053270a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -3,12 +3,494 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+
+struct hbg_ethtool_stats {
+ char name[ETH_GSTRING_LEN];
+ unsigned long offset;
+ u32 reg; /* set to 0 if stats is not updated via dump reg */
+};
+
+#define HBG_STATS_I(stats) { #stats, HBG_STATS_FIELD_OFF(stats), 0}
+#define HBG_STATS_REG_I(stats, reg) { #stats, HBG_STATS_FIELD_OFF(stats), reg}
+
+static const struct hbg_ethtool_stats hbg_ethtool_stats_info[] = {
+ HBG_STATS_I(rx_desc_l2_err_cnt),
+ HBG_STATS_I(rx_desc_pkt_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l3_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l3_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_zero_ttl_cnt),
+ HBG_STATS_I(rx_desc_l3_other_cnt),
+ HBG_STATS_I(rx_desc_l4_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l4_len_err_cnt),
+ HBG_STATS_I(rx_desc_l4_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l4_zero_port_num_cnt),
+ HBG_STATS_I(rx_desc_l4_other_cnt),
+ HBG_STATS_I(rx_desc_ip_ver_err_cnt),
+ HBG_STATS_I(rx_desc_ipv4_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipv6_pkt_cnt),
+ HBG_STATS_I(rx_desc_no_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_tcp_pkt_cnt),
+ HBG_STATS_I(rx_desc_udp_pkt_cnt),
+ HBG_STATS_I(rx_desc_vlan_pkt_cnt),
+ HBG_STATS_I(rx_desc_icmp_pkt_cnt),
+ HBG_STATS_I(rx_desc_arp_pkt_cnt),
+ HBG_STATS_I(rx_desc_rarp_pkt_cnt),
+ HBG_STATS_I(rx_desc_multicast_pkt_cnt),
+ HBG_STATS_I(rx_desc_broadcast_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipsec_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_opt_pkt_cnt),
+ HBG_STATS_I(rx_desc_key_not_match_cnt),
+
+ HBG_STATS_REG_I(rx_octets_bad_cnt, HBG_REG_RX_OCTETS_BAD_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_filt_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR),
+ HBG_STATS_REG_I(rx_uc_pkt_cnt, HBG_REG_RX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_vlan_pkt_cnt, HBG_REG_RX_TAGGED_ADDR),
+ HBG_STATS_REG_I(rx_filt_pkt_cnt, HBG_REG_RX_FILT_PKT_CNT_ADDR),
+ HBG_STATS_REG_I(rx_data_error_cnt, HBG_REG_RX_DATA_ERR_ADDR),
+ HBG_STATS_REG_I(rx_frame_long_err_cnt, HBG_REG_RX_LONG_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_jabber_err_cnt, HBG_REG_RX_JABBER_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_frame_very_long_err_cnt,
+ HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_runt_err_cnt, HBG_REG_RX_RUNT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_short_err_cnt, HBG_REG_RX_SHORT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overflow_cnt, HBG_REG_RX_OVER_FLOW_CNT_ADDR),
+ HBG_STATS_REG_I(rx_bufrq_err_cnt, HBG_REG_RX_BUFRQ_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_we_err_cnt, HBG_REG_RX_WE_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overrun_cnt, HBG_REG_RX_OVERRUN_CNT_ADDR),
+ HBG_STATS_REG_I(rx_lengthfield_err_cnt,
+ HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fail_comma_cnt, HBG_REG_RX_FAIL_COMMA_CNT_ADDR),
+ HBG_STATS_I(rx_dma_err_cnt),
+ HBG_STATS_I(rx_fifo_less_empty_thrsld_cnt),
+
+ HBG_STATS_REG_I(tx_uc_pkt_cnt, HBG_REG_TX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_vlan_pkt_cnt, HBG_REG_TX_TAGGED_ADDR),
+ HBG_STATS_REG_I(tx_octets_bad_cnt, HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR),
+
+ HBG_STATS_REG_I(tx_underrun_err_cnt, HBG_REG_TX_UNDERRUN_ADDR),
+ HBG_STATS_REG_I(tx_add_cs_fail_cnt, HBG_REG_TX_CS_FAIL_CNT_ADDR),
+ HBG_STATS_REG_I(tx_bufrl_err_cnt, HBG_REG_TX_BUFRL_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(tx_crc_err_cnt, HBG_REG_TX_CRC_ERROR_ADDR),
+ HBG_STATS_REG_I(tx_drop_cnt, HBG_REG_TX_DROP_CNT_ADDR),
+ HBG_STATS_REG_I(tx_excessive_length_drop_cnt,
+ HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR),
+ HBG_STATS_I(tx_dma_err_cnt),
+ HBG_STATS_I(tx_timeout_cnt),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_rmon_stats_info[] = {
+ HBG_STATS_I(rx_desc_frag_cnt),
+ HBG_STATS_REG_I(rx_framesize_64, HBG_REG_RX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_65_127,
+ HBG_REG_RX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_128_255,
+ HBG_REG_RX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_256_511,
+ HBG_REG_RX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_512_1023,
+ HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_1024_1518,
+ HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_bt_1518,
+ HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_64, HBG_REG_TX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_65_127,
+ HBG_REG_TX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_128_255,
+ HBG_REG_TX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_256_511,
+ HBG_REG_TX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_512_1023,
+ HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_1024_1518,
+ HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_bt_1518,
+ HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_mac_stats_info[] = {
+ HBG_STATS_REG_I(rx_mc_pkt_cnt, HBG_REG_RX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_bc_pkt_cnt, HBG_REG_RX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_align_error_cnt, HBG_REG_RX_ALIGN_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_ok_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_OK_ADDR),
+ HBG_STATS_REG_I(rx_trans_pkt_cnt, HBG_REG_RX_TRANS_PKG_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fcs_error_cnt, HBG_REG_RX_FCS_ERRORS_ADDR),
+ HBG_STATS_REG_I(tx_mc_pkt_cnt, HBG_REG_TX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_bc_pkt_cnt, HBG_REG_TX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_octets_total_ok_cnt,
+ HBG_REG_OCTETS_TRANSMITTED_OK_ADDR),
+ HBG_STATS_REG_I(tx_trans_pkt_cnt, HBG_REG_TX_TRANS_PKG_CNT_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_ctrl_stats_info[] = {
+ HBG_STATS_REG_I(rx_pause_macctl_frame_cnt,
+ HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR),
+ HBG_STATS_REG_I(tx_pause_frame_cnt, HBG_REG_TX_PAUSE_FRAMES_ADDR),
+ HBG_STATS_REG_I(rx_unknown_macctl_frame_cnt,
+ HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR),
+};
+
+enum hbg_reg_dump_type {
+ HBG_DUMP_REG_TYPE_SPEC = 0,
+ HBG_DUMP_REG_TYPE_MDIO,
+ HBG_DUMP_REG_TYPE_GMAC,
+ HBG_DUMP_REG_TYPE_PCU,
+};
+
+struct hbg_reg_info {
+ u32 type;
+ u32 offset;
+ u32 val;
+};
+
+#define HBG_DUMP_SPEC_I(offset) {HBG_DUMP_REG_TYPE_SPEC, offset, 0}
+#define HBG_DUMP_MDIO_I(offset) {HBG_DUMP_REG_TYPE_MDIO, offset, 0}
+#define HBG_DUMP_GMAC_I(offset) {HBG_DUMP_REG_TYPE_GMAC, offset, 0}
+#define HBG_DUMP_PCU_I(offset) {HBG_DUMP_REG_TYPE_PCU, offset, 0}
+
+static const struct hbg_reg_info hbg_dump_reg_infos[] = {
+ /* dev specs */
+ HBG_DUMP_SPEC_I(HBG_REG_SPEC_VALID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_EVENT_REQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_PHY_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_HIGH_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_UC_MAC_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MDIO_FREQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAX_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MIN_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_TX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_RX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_VLAN_LAYERS_ADDR),
+
+ /* mdio */
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_COMMAND_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_ADDR_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_WDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_RDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_STA_ADDR),
+
+ /* gmac */
+ HBG_DUMP_GMAC_I(HBG_REG_DUPLEX_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FC_TX_TIMER_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_LOW_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_HIGH_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MAX_FRAME_SIZE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_MODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PAUSE_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_AN_NEG_STATE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_TRANSMIT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_REC_FILT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LINE_LOOP_BACK_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_CF_CRC_STRIP_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MODE_CHANGE_EN_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LOOP_REG_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_RECV_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_VLAN_CODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_5_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_5_ADDR),
+
+ /* pcu */
+ HBG_DUMP_PCU_I(HBG_REG_TX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CFG_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_TX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_MAX_FRAME_LEN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DEBUG_ST_MCH_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_CURR_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_HIST_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_CFF_DATA_NUM_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_TX_PAUSE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CFF_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUF_SIZE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_PKT_MODE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST0_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST1_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST2_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_RST_EN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_CLR_ADDR),
+};
+
+static const u32 hbg_dump_type_base_array[] = {
+ [HBG_DUMP_REG_TYPE_SPEC] = 0,
+ [HBG_DUMP_REG_TYPE_MDIO] = HBG_REG_MDIO_BASE,
+ [HBG_DUMP_REG_TYPE_GMAC] = HBG_REG_SGMII_BASE,
+ [HBG_DUMP_REG_TYPE_PCU] = HBG_REG_SGMII_BASE,
+};
+
+static int hbg_ethtool_get_regs_len(struct net_device *netdev)
+{
+ return ARRAY_SIZE(hbg_dump_reg_infos) * sizeof(struct hbg_reg_info);
+}
+
+static void hbg_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_reg_info *info;
+ u32 i, offset = 0;
+
+ regs->version = 0;
+ for (i = 0; i < ARRAY_SIZE(hbg_dump_reg_infos); i++) {
+ info = data + offset;
+
+ *info = hbg_dump_reg_infos[i];
+ info->val = hbg_reg_read(priv, info->offset);
+ info->offset -= hbg_dump_type_base_array[info->type];
+
+ offset += sizeof(*info);
+ }
+}
+
+static void hbg_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &param->tx_pause, &param->rx_pause);
+}
+
+static int hbg_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ priv->mac.pause_autoneg = param->autoneg;
+ phy_set_asym_pause(priv->mac.phydev, param->rx_pause, param->tx_pause);
+
+ if (!param->autoneg)
+ hbg_hw_set_pause_enable(priv, param->tx_pause, param->rx_pause);
+
+ priv->user_def.pause_param = *param;
+ return 0;
+}
+
+static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (*flags != ETH_RESET_DEDICATED)
+ return -EOPNOTSUPP;
+
+ *flags = 0;
+ return hbg_reset(priv);
+}
+
+static void hbg_update_stats_by_info(struct hbg_priv *priv,
+ const struct hbg_ethtool_stats *info,
+ u32 info_len)
+{
+ const struct hbg_ethtool_stats *stats;
+ u32 i;
+
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ for (i = 0; i < info_len; i++) {
+ stats = &info[i];
+ if (!stats->reg)
+ continue;
+
+ HBG_STATS_U(&priv->stats, stats->offset,
+ hbg_reg_read(priv, stats->reg));
+ }
+}
+
+void hbg_update_stats(struct hbg_priv *priv)
+{
+ hbg_update_stats_by_info(priv, hbg_ethtool_stats_info,
+ ARRAY_SIZE(hbg_ethtool_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_rmon_stats_info,
+ ARRAY_SIZE(hbg_ethtool_rmon_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_mac_stats_info,
+ ARRAY_SIZE(hbg_ethtool_mac_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_ctrl_stats_info,
+ ARRAY_SIZE(hbg_ethtool_ctrl_stats_info));
+}
+
+static int hbg_ethtool_get_sset_count(struct net_device *netdev, int stringset)
+{
+ if (stringset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return ARRAY_SIZE(hbg_ethtool_stats_info);
+}
+
+static void hbg_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ ethtool_puts(&data, hbg_ethtool_stats_info[i].name);
+}
+
+static void hbg_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ u32 i;
+
+ hbg_update_stats(priv);
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ *data++ = HBG_STATS_R(&priv->stats,
+ hbg_ethtool_stats_info[i].offset);
+}
+
+static void hbg_ethtool_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *epstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ epstats->rx_pause_frames = stats->rx_pause_macctl_frame_cnt;
+ epstats->tx_pause_frames = stats->tx_pause_frame_cnt;
+}
+
+static void hbg_ethtool_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *emstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ emstats->FramesTransmittedOK = stats->tx_trans_pkt_cnt;
+ emstats->FramesReceivedOK = stats->rx_trans_pkt_cnt;
+ emstats->FrameCheckSequenceErrors = stats->rx_fcs_error_cnt;
+ emstats->AlignmentErrors = stats->rx_align_error_cnt;
+ emstats->OctetsTransmittedOK = stats->tx_octets_total_ok_cnt;
+ emstats->OctetsReceivedOK = stats->rx_octets_total_ok_cnt;
+
+ emstats->MulticastFramesXmittedOK = stats->tx_mc_pkt_cnt;
+ emstats->BroadcastFramesXmittedOK = stats->tx_bc_pkt_cnt;
+ emstats->MulticastFramesReceivedOK = stats->rx_mc_pkt_cnt;
+ emstats->BroadcastFramesReceivedOK = stats->rx_bc_pkt_cnt;
+ emstats->InRangeLengthErrors = stats->rx_fcs_error_cnt +
+ stats->rx_jabber_err_cnt +
+ stats->rx_unknown_macctl_frame_cnt +
+ stats->rx_bufrq_err_cnt +
+ stats->rx_we_err_cnt;
+ emstats->OutOfRangeLengthField = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt +
+ stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ emstats->FrameTooLongErrors = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+}
+
+static void
+hbg_ethtool_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *ecstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *s = &priv->stats;
+
+ hbg_update_stats(priv);
+ ecstats->MACControlFramesTransmitted = s->tx_pause_frame_cnt;
+ ecstats->MACControlFramesReceived = s->rx_pause_macctl_frame_cnt;
+ ecstats->UnsupportedOpcodesReceived = s->rx_unknown_macctl_frame_cnt;
+}
+
+static const struct ethtool_rmon_hist_range hbg_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 4095 },
+};
+
+static void
+hbg_ethtool_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ rmon_stats->undersize_pkts = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt;
+ rmon_stats->oversize_pkts = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ rmon_stats->fragments = stats->rx_desc_frag_cnt;
+ rmon_stats->hist[0] = stats->rx_framesize_64;
+ rmon_stats->hist[1] = stats->rx_framesize_65_127;
+ rmon_stats->hist[2] = stats->rx_framesize_128_255;
+ rmon_stats->hist[3] = stats->rx_framesize_256_511;
+ rmon_stats->hist[4] = stats->rx_framesize_512_1023;
+ rmon_stats->hist[5] = stats->rx_framesize_1024_1518;
+ rmon_stats->hist[6] = stats->rx_framesize_bt_1518;
+
+ rmon_stats->hist_tx[0] = stats->tx_framesize_64;
+ rmon_stats->hist_tx[1] = stats->tx_framesize_65_127;
+ rmon_stats->hist_tx[2] = stats->tx_framesize_128_255;
+ rmon_stats->hist_tx[3] = stats->tx_framesize_256_511;
+ rmon_stats->hist_tx[4] = stats->tx_framesize_512_1023;
+ rmon_stats->hist_tx[5] = stats->tx_framesize_1024_1518;
+ rmon_stats->hist_tx[6] = stats->tx_framesize_bt_1518;
+
+ *ranges = hbg_rmon_ranges;
+}
static const struct ethtool_ops hbg_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_regs_len = hbg_ethtool_get_regs_len,
+ .get_regs = hbg_ethtool_get_regs,
+ .get_pauseparam = hbg_ethtool_get_pauseparam,
+ .set_pauseparam = hbg_ethtool_set_pauseparam,
+ .reset = hbg_ethtool_reset,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_sset_count = hbg_ethtool_get_sset_count,
+ .get_strings = hbg_ethtool_get_strings,
+ .get_ethtool_stats = hbg_ethtool_get_stats,
+ .get_pause_stats = hbg_ethtool_get_pause_stats,
+ .get_eth_mac_stats = hbg_ethtool_get_eth_mac_stats,
+ .get_eth_ctrl_stats = hbg_ethtool_get_eth_ctrl_stats,
+ .get_rmon_stats = hbg_ethtool_get_rmon_stats,
};
void hbg_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
index 628707ec2686..e173155b146a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
@@ -6,6 +6,11 @@
#include <linux/netdevice.h>
+#define HBG_STATS_FIELD_OFF(f) (offsetof(struct hbg_stats, f))
+#define HBG_STATS_R(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
+#define HBG_STATS_U(p, offset, val) (HBG_STATS_R(p, offset) += (val))
+
void hbg_ethtool_set_ops(struct net_device *netdev);
+void hbg_update_stats(struct hbg_priv *priv);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index 05295c2ad439..9b65eef62b3f 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include <linux/iopoll.h>
#include <linux/minmax.h>
#include "hbg_common.h"
@@ -67,6 +68,8 @@ static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR);
specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR);
specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR);
+ specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR);
+
mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR);
u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data);
@@ -135,9 +138,13 @@ void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable)
hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value);
}
-void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr)
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index)
{
- hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_2_ADDR, mac_addr);
+ u32 addr;
+
+ /* mac addr is u64, so the addr offset is 0x8 */
+ addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8);
+ hbg_reg_write64(priv, addr, mac_addr);
}
static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv,
@@ -161,8 +168,13 @@ static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
{
- hbg_hw_set_pcu_max_frame_len(priv, mtu);
- hbg_hw_set_mac_max_frame_len(priv, mtu);
+ u32 frame_len;
+
+ frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
+ ETH_HLEN + ETH_FCS_LEN;
+
+ hbg_hw_set_pcu_max_frame_len(priv, frame_len);
+ hbg_hw_set_mac_max_frame_len(priv, frame_len);
}
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
@@ -201,10 +213,55 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
{
+ hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
+
hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
HBG_REG_PORT_MODE_M, speed);
hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR,
HBG_REG_DUPLEX_B, duplex);
+
+ hbg_hw_event_notify(priv, HBG_HW_EVENT_CORE_RESET);
+
+ hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
+
+ if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B))
+ hbg_np_link_fail_task_schedule(priv);
+}
+
+/* only support uc filter */
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable)
+{
+ hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
+ HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable);
+
+ /* only uc filter is supported, so set all bits of mc mask reg to 1 */
+ hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_0, U64_MAX);
+ hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_1, U64_MAX);
+}
+
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en)
+{
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B, tx_en);
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B, rx_en);
+
+ hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
+ HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B, rx_en);
+}
+
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en)
+{
+ *tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B);
+ *rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B);
+}
+
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr)
+{
+ hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr);
}
static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
index 14fb39241c93..a4a049b5121d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
@@ -51,9 +51,13 @@ bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask);
void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable);
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu);
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable);
-void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr);
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index);
u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir);
void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc);
void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr);
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable);
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en);
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en);
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
index 25dd25f096fe..8af0bc4cca21 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
@@ -6,54 +6,65 @@
#include "hbg_hw.h"
static void hbg_irq_handle_err(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
if (irq_info->need_print)
dev_err(&priv->pdev->dev,
"receive error interrupt: %s\n", irq_info->name);
+
+ if (irq_info->need_reset)
+ hbg_err_reset_task_schedule(priv);
}
static void hbg_irq_handle_tx(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
napi_schedule(&priv->tx_ring.napi);
}
static void hbg_irq_handle_rx(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
napi_schedule(&priv->rx_ring.napi);
}
-#define HBG_TXRX_IRQ_I(name, handle) \
- {#name, HBG_INT_MSK_##name##_B, false, false, 0, handle}
-#define HBG_ERR_IRQ_I(name, need_print) \
- {#name, HBG_INT_MSK_##name##_B, true, need_print, 0, hbg_irq_handle_err}
-
-static struct hbg_irq_info hbg_irqs[] = {
- HBG_TXRX_IRQ_I(RX, hbg_irq_handle_rx),
- HBG_TXRX_IRQ_I(TX, hbg_irq_handle_tx),
- HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true),
- HBG_ERR_IRQ_I(TX_AHB_ERR, true),
- HBG_ERR_IRQ_I(RX_BUF_AVL, false),
- HBG_ERR_IRQ_I(REL_BUF_ERR, true),
- HBG_ERR_IRQ_I(TXCFG_AVL, false),
- HBG_ERR_IRQ_I(TX_DROP, false),
- HBG_ERR_IRQ_I(RX_DROP, false),
- HBG_ERR_IRQ_I(RX_AHB_ERR, true),
- HBG_ERR_IRQ_I(MAC_FIFO_ERR, false),
- HBG_ERR_IRQ_I(RBREQ_ERR, false),
- HBG_ERR_IRQ_I(WE_ERR, false),
+static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,
+ const struct hbg_irq_info *irq_info)
+{
+ priv->stats.rx_fifo_less_empty_thrsld_cnt++;
+}
+
+#define HBG_IRQ_I(name, handle) \
+ {#name, HBG_INT_MSK_##name##_B, false, false, false, handle}
+#define HBG_ERR_IRQ_I(name, need_print, ndde_reset) \
+ {#name, HBG_INT_MSK_##name##_B, true, need_print, \
+ ndde_reset, hbg_irq_handle_err}
+
+static const struct hbg_irq_info hbg_irqs[] = {
+ HBG_IRQ_I(RX, hbg_irq_handle_rx),
+ HBG_IRQ_I(TX, hbg_irq_handle_tx),
+ HBG_ERR_IRQ_I(TX_PKT_CPL, true, true),
+ HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true, false),
+ HBG_ERR_IRQ_I(TX_AHB_ERR, true, true),
+ HBG_IRQ_I(RX_BUF_AVL, hbg_irq_handle_rx_buf_val),
+ HBG_ERR_IRQ_I(REL_BUF_ERR, true, false),
+ HBG_ERR_IRQ_I(TXCFG_AVL, false, false),
+ HBG_ERR_IRQ_I(TX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_AHB_ERR, true, false),
+ HBG_ERR_IRQ_I(MAC_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(RBREQ_ERR, true, true),
+ HBG_ERR_IRQ_I(WE_ERR, true, true),
};
static irqreturn_t hbg_irq_handle(int irq_num, void *p)
{
- struct hbg_irq_info *info;
+ const struct hbg_irq_info *info;
struct hbg_priv *priv = p;
u32 status;
u32 i;
@@ -68,7 +79,7 @@ static irqreturn_t hbg_irq_handle(int irq_num, void *p)
hbg_hw_irq_enable(priv, info->mask, false);
hbg_hw_irq_clear(priv, info->mask);
- info->count++;
+ priv->vectors.stats_array[i]++;
if (info->irq_handle)
info->irq_handle(priv, info);
@@ -121,6 +132,12 @@ int hbg_irq_init(struct hbg_priv *priv)
irq_names_map[i]);
}
+ vectors->stats_array = devm_kcalloc(&priv->pdev->dev,
+ ARRAY_SIZE(hbg_irqs),
+ sizeof(u64), GFP_KERNEL);
+ if (!vectors->stats_array)
+ return -ENOMEM;
+
vectors->info_array = hbg_irqs;
vectors->info_array_len = ARRAY_SIZE(hbg_irqs);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
index 75505fb5cc4a..2e64dc1ab355 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -5,18 +5,23 @@
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <linux/phy.h>
#include "hbg_common.h"
+#include "hbg_diagnose.h"
+#include "hbg_err.h"
#include "hbg_ethtool.h"
#include "hbg_hw.h"
#include "hbg_irq.h"
#include "hbg_mdio.h"
#include "hbg_txrx.h"
+#include "hbg_debugfs.h"
-static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu);
+#define HBG_SUPPORT_FEATURES (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_RXCSUM)
static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
{
- struct hbg_irq_info *info;
+ const struct hbg_irq_info *info;
u32 i;
for (i = 0; i < priv->vectors.info_array_len; i++) {
@@ -55,11 +60,7 @@ static int hbg_hw_txrx_clear(struct hbg_priv *priv)
return ret;
/* After reset, regs need to be reconfigured */
- hbg_hw_init(priv);
- hbg_hw_set_uc_addr(priv, ether_addr_to_u64(priv->netdev->dev_addr));
- hbg_change_mtu(priv, priv->netdev->mtu);
-
- return 0;
+ return hbg_rebuild(priv);
}
static int hbg_net_stop(struct net_device *netdev)
@@ -74,31 +75,127 @@ static int hbg_net_stop(struct net_device *netdev)
return hbg_hw_txrx_clear(priv);
}
+static void hbg_update_promisc_mode(struct net_device *netdev, bool overflow)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ /* Only when not table_overflow, and netdev->flags not set IFF_PROMISC,
+ * The MAC filter will be enabled.
+ * Otherwise the filter will be disabled.
+ */
+ priv->filter.enabled = !(overflow || (netdev->flags & IFF_PROMISC));
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_set_mac_to_mac_table(struct hbg_priv *priv,
+ u32 index, const u8 *addr)
+{
+ if (addr) {
+ ether_addr_copy(priv->filter.mac_table[index].addr, addr);
+ hbg_hw_set_uc_addr(priv, ether_addr_to_u64(addr), index);
+ } else {
+ eth_zero_addr(priv->filter.mac_table[index].addr);
+ hbg_hw_set_uc_addr(priv, 0, index);
+ }
+}
+
+static int hbg_get_index_from_mac_table(struct hbg_priv *priv,
+ const u8 *addr, u32 *index)
+{
+ u32 i;
+
+ for (i = 0; i < priv->filter.table_max_len; i++)
+ if (ether_addr_equal(priv->filter.mac_table[i].addr, addr)) {
+ *index = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hbg_add_mac_to_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* already exists */
+ if (!hbg_get_index_from_mac_table(priv, addr, &index))
+ return 0;
+
+ for (index = 0; index < priv->filter.table_max_len; index++)
+ if (is_zero_ether_addr(priv->filter.mac_table[index].addr)) {
+ hbg_set_mac_to_mac_table(priv, index, addr);
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static void hbg_del_mac_from_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* not exists */
+ if (hbg_get_index_from_mac_table(priv, addr, &index))
+ return;
+
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+}
+
+static int hbg_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ return hbg_add_mac_to_filter(priv, addr);
+}
+
+static int hbg_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (ether_addr_equal(netdev->dev_addr, (u8 *)addr))
+ return 0;
+
+ hbg_del_mac_from_filter(priv, addr);
+ return 0;
+}
+
+static void hbg_net_set_rx_mode(struct net_device *netdev)
+{
+ int ret;
+
+ ret = __dev_uc_sync(netdev, hbg_uc_sync, hbg_uc_unsync);
+
+ /* If ret != 0, overflow has occurred */
+ hbg_update_promisc_mode(netdev, !!ret);
+}
+
static int hbg_net_set_mac_address(struct net_device *netdev, void *addr)
{
struct hbg_priv *priv = netdev_priv(netdev);
u8 *mac_addr;
+ bool exists;
+ u32 index;
mac_addr = ((struct sockaddr *)addr)->sa_data;
if (!is_valid_ether_addr(mac_addr))
return -EADDRNOTAVAIL;
- hbg_hw_set_uc_addr(priv, ether_addr_to_u64(mac_addr));
- dev_addr_set(netdev, mac_addr);
+ /* The index of host mac is always 0.
+ * If new mac address already exists,
+ * delete the existing mac address and
+ * add it to the position with index 0.
+ */
+ exists = !hbg_get_index_from_mac_table(priv, mac_addr, &index);
+ hbg_set_mac_to_mac_table(priv, 0, mac_addr);
+ if (exists)
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+ hbg_hw_set_rx_pause_mac_addr(priv, ether_addr_to_u64(mac_addr));
+ dev_addr_set(netdev, mac_addr);
return 0;
}
-static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu)
-{
- u32 frame_len;
-
- frame_len = new_mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
- ETH_HLEN + ETH_FCS_LEN;
- hbg_hw_set_mtu(priv, frame_len);
-}
-
static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hbg_priv *priv = netdev_priv(netdev);
@@ -106,12 +203,12 @@ static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
return -EBUSY;
- hbg_change_mtu(priv, new_mtu);
- WRITE_ONCE(netdev->mtu, new_mtu);
-
dev_dbg(&priv->pdev->dev,
"change mtu from %u to %u\n", netdev->mtu, new_mtu);
+ hbg_hw_set_mtu(priv, new_mtu);
+ WRITE_ONCE(netdev->mtu, new_mtu);
+
return 0;
}
@@ -122,6 +219,10 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
char *buf = ring->tout_log_buf;
u32 pos = 0;
+ priv->stats.tx_timeout_cnt++;
+
+ pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
+ "tx_timeout cnt: %llu\n", priv->stats.tx_timeout_cnt);
pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
"ring used num: %u, fifo used num: %u\n",
hbg_get_queue_used_num(ring),
@@ -134,6 +235,39 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
netdev_info(netdev, "%s", buf);
}
+static void hbg_net_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *h_stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ dev_get_tstats64(netdev, stats);
+
+ /* fifo empty */
+ stats->tx_fifo_errors += h_stats->tx_drop_cnt;
+
+ stats->tx_dropped += h_stats->tx_excessive_length_drop_cnt +
+ h_stats->tx_drop_cnt;
+ stats->tx_errors += h_stats->tx_add_cs_fail_cnt +
+ h_stats->tx_bufrl_err_cnt +
+ h_stats->tx_underrun_err_cnt +
+ h_stats->tx_crc_err_cnt;
+ stats->rx_errors += h_stats->rx_data_error_cnt;
+ stats->multicast += h_stats->rx_mc_pkt_cnt;
+ stats->rx_dropped += h_stats->rx_desc_drop;
+ stats->rx_length_errors += h_stats->rx_frame_very_long_err_cnt +
+ h_stats->rx_frame_long_err_cnt +
+ h_stats->rx_frame_runt_err_cnt +
+ h_stats->rx_frame_short_err_cnt +
+ h_stats->rx_lengthfield_err_cnt;
+ stats->rx_frame_errors += h_stats->rx_desc_l2_err_cnt +
+ h_stats->rx_desc_l3l4_err_cnt;
+ stats->rx_fifo_errors += h_stats->rx_overflow_cnt +
+ h_stats->rx_overrun_cnt;
+ stats->rx_crc_errors += h_stats->rx_fcs_error_cnt;
+}
+
static const struct net_device_ops hbg_netdev_ops = {
.ndo_open = hbg_net_open,
.ndo_stop = hbg_net_stop,
@@ -142,8 +276,93 @@ static const struct net_device_ops hbg_netdev_ops = {
.ndo_set_mac_address = hbg_net_set_mac_address,
.ndo_change_mtu = hbg_net_change_mtu,
.ndo_tx_timeout = hbg_net_tx_timeout,
+ .ndo_set_rx_mode = hbg_net_set_rx_mode,
+ .ndo_get_stats64 = hbg_net_get_stats,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
};
+static void hbg_service_task(struct work_struct *work)
+{
+ struct hbg_priv *priv = container_of(work, struct hbg_priv,
+ service_task.work);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NEED_RESET, &priv->state))
+ hbg_err_reset(priv);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state))
+ hbg_fix_np_link_fail(priv);
+
+ hbg_diagnose_message_push(priv);
+
+ /* The type of statistics register is u32,
+ * To prevent the statistics register from overflowing,
+ * the driver dumps the statistics every 30 seconds.
+ */
+ if (time_after(jiffies, priv->last_update_stats_time + 30 * HZ)) {
+ hbg_update_stats(priv);
+ priv->last_update_stats_time = jiffies;
+ }
+
+ schedule_delayed_work(&priv->service_task,
+ msecs_to_jiffies(MSEC_PER_SEC));
+}
+
+void hbg_err_reset_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NEED_RESET, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+static void hbg_cancel_delayed_work_sync(void *data)
+{
+ cancel_delayed_work_sync(data);
+}
+
+static int hbg_delaywork_init(struct hbg_priv *priv)
+{
+ INIT_DELAYED_WORK(&priv->service_task, hbg_service_task);
+ schedule_delayed_work(&priv->service_task, 0);
+ return devm_add_action_or_reset(&priv->pdev->dev,
+ hbg_cancel_delayed_work_sync,
+ &priv->service_task);
+}
+
+static int hbg_mac_filter_init(struct hbg_priv *priv)
+{
+ struct hbg_dev_specs *dev_specs = &priv->dev_specs;
+ struct hbg_mac_filter *filter = &priv->filter;
+ struct hbg_mac_table_entry *tmp_table;
+
+ tmp_table = devm_kcalloc(&priv->pdev->dev, dev_specs->uc_mac_num,
+ sizeof(*tmp_table), GFP_KERNEL);
+ if (!tmp_table)
+ return -ENOMEM;
+
+ filter->mac_table = tmp_table;
+ filter->table_max_len = dev_specs->uc_mac_num;
+ filter->enabled = true;
+
+ hbg_hw_set_mac_filter_enable(priv, filter->enabled);
+ return 0;
+}
+
+static void hbg_init_user_def(struct hbg_priv *priv)
+{
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ priv->mac.pause_autoneg = HBG_STATUS_ENABLE;
+
+ pause_param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &pause_param->tx_pause,
+ &pause_param->rx_pause);
+}
+
static int hbg_init(struct hbg_priv *priv)
{
int ret;
@@ -160,7 +379,21 @@ static int hbg_init(struct hbg_priv *priv)
if (ret)
return ret;
- return hbg_mdio_init(priv);
+ ret = hbg_mdio_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_mac_filter_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_delaywork_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_debugfs_init(priv);
+ hbg_init_user_def(priv);
+ return 0;
}
static int hbg_pci_init(struct pci_dev *pdev)
@@ -216,13 +449,18 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
+ /* set default features */
+ netdev->features |= HBG_SUPPORT_FEATURES;
+ netdev->hw_features |= HBG_SUPPORT_FEATURES;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netdev->max_mtu = priv->dev_specs.max_mtu;
netdev->min_mtu = priv->dev_specs.min_mtu;
netdev->netdev_ops = &hbg_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
- hbg_change_mtu(priv, ETH_DATA_LEN);
+ hbg_hw_set_mtu(priv, ETH_DATA_LEN);
hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr);
hbg_ethtool_set_ops(netdev);
@@ -245,7 +483,27 @@ static struct pci_driver hbg_driver = {
.id_table = hbg_pci_tbl,
.probe = hbg_probe,
};
-module_pci_driver(hbg_driver);
+
+static int __init hbg_module_init(void)
+{
+ int ret;
+
+ hbg_debugfs_register();
+ hbg_set_pci_err_handler(&hbg_driver);
+ ret = pci_register_driver(&hbg_driver);
+ if (ret)
+ hbg_debugfs_unregister();
+
+ return ret;
+}
+module_init(hbg_module_init);
+
+static void __exit hbg_module_exit(void)
+{
+ pci_unregister_driver(&hbg_driver);
+ hbg_debugfs_unregister();
+}
+module_exit(hbg_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index a3479fba8501..42b0083c9193 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -2,6 +2,7 @@
// Copyright (c) 2024 Hisilicon Limited.
#include <linux/phy.h>
+#include <linux/rtnetlink.h>
#include "hbg_common.h"
#include "hbg_hw.h"
#include "hbg_mdio.h"
@@ -17,6 +18,8 @@
#define HBG_MDIO_OP_TIMEOUT_US (1 * 1000 * 1000)
#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
+#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
+
static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
{
hbg_reg_write(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR, cmd);
@@ -114,6 +117,47 @@ static void hbg_mdio_init_hw(struct hbg_priv *priv)
hbg_mdio_set_command(mac, cmd);
}
+static void hbg_flowctrl_cfg(struct hbg_priv *priv)
+{
+ struct phy_device *phydev = priv->mac.phydev;
+ bool rx_pause;
+ bool tx_pause;
+
+ if (!priv->mac.pause_autoneg)
+ return;
+
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
+ hbg_hw_set_pause_enable(priv, tx_pause, rx_pause);
+}
+
+void hbg_fix_np_link_fail(struct hbg_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ rtnl_lock();
+
+ if (priv->stats.np_link_fail_cnt >= HBG_NP_LINK_FAIL_RETRY_TIMES) {
+ dev_err(dev, "failed to fix the MAC link status\n");
+ priv->stats.np_link_fail_cnt = 0;
+ goto unlock;
+ }
+
+ if (!priv->mac.phydev->link)
+ goto unlock;
+
+ priv->stats.np_link_fail_cnt++;
+ dev_err(dev, "failed to link between MAC and PHY, try to fix...\n");
+
+ /* Replace phy_reset() with phy_stop() and phy_start(),
+ * as suggested by Andrew.
+ */
+ hbg_phy_stop(priv);
+ hbg_phy_start(priv);
+
+unlock:
+ rtnl_unlock();
+}
+
static void hbg_phy_adjust_link(struct net_device *netdev)
{
struct hbg_priv *priv = netdev_priv(netdev);
@@ -140,6 +184,7 @@ static void hbg_phy_adjust_link(struct net_device *netdev)
priv->mac.duplex = phydev->duplex;
priv->mac.autoneg = phydev->autoneg;
hbg_hw_adjust_link(priv, speed, phydev->duplex);
+ hbg_flowctrl_cfg(priv);
}
priv->mac.link_status = phydev->link;
@@ -168,6 +213,7 @@ static int hbg_phy_connect(struct hbg_priv *priv)
return ret;
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_support_asym_pause(phydev);
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
index febd02a309c7..f3771c1bbd34 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
@@ -9,4 +9,6 @@
int hbg_mdio_init(struct hbg_priv *priv);
void hbg_phy_start(struct hbg_priv *priv);
void hbg_phy_stop(struct hbg_priv *priv);
+void hbg_fix_np_link_fail(struct hbg_priv *priv);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index 57d81c6d7633..a6e7f5e62b48 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -10,12 +10,21 @@
#define HBG_REG_MAC_ID_ADDR 0x0008
#define HBG_REG_PHY_ID_ADDR 0x000C
#define HBG_REG_MAC_ADDR_ADDR 0x0010
+#define HBG_REG_MAC_ADDR_HIGH_ADDR 0x0014
+#define HBG_REG_UC_MAC_NUM_ADDR 0x0018
#define HBG_REG_MDIO_FREQ_ADDR 0x0024
#define HBG_REG_MAX_MTU_ADDR 0x0028
#define HBG_REG_MIN_MTU_ADDR 0x002C
#define HBG_REG_TX_FIFO_NUM_ADDR 0x0030
#define HBG_REG_RX_FIFO_NUM_ADDR 0x0034
#define HBG_REG_VLAN_LAYERS_ADDR 0x0038
+#define HBG_REG_PUSH_REQ_ADDR 0x00F0
+#define HBG_REG_MSG_HEADER_ADDR 0x00F4
+#define HBG_REG_MSG_HEADER_OPCODE_M GENMASK(7, 0)
+#define HBG_REG_MSG_HEADER_STATUS_M GENMASK(11, 8)
+#define HBG_REG_MSG_HEADER_DATA_NUM_M GENMASK(19, 12)
+#define HBG_REG_MSG_HEADER_RESP_CODE_M GENMASK(27, 20)
+#define HBG_REG_MSG_DATA_BASE_ADDR 0x0100
/* MDIO */
#define HBG_REG_MDIO_BASE 0x8000
@@ -28,6 +37,7 @@
#define HBG_REG_MDIO_COMMAND_OP_M GENMASK(11, 10)
#define HBG_REG_MDIO_COMMAND_PRTAD_M GENMASK(9, 5)
#define HBG_REG_MDIO_COMMAND_DEVAD_M GENMASK(4, 0)
+#define HBG_REG_MDIO_ADDR_ADDR (HBG_REG_MDIO_BASE + 0x0004)
#define HBG_REG_MDIO_WDATA_ADDR (HBG_REG_MDIO_BASE + 0x0008)
#define HBG_REG_MDIO_WDATA_M GENMASK(15, 0)
#define HBG_REG_MDIO_RDATA_ADDR (HBG_REG_MDIO_BASE + 0x000C)
@@ -36,6 +46,10 @@
/* GMAC */
#define HBG_REG_SGMII_BASE 0x10000
#define HBG_REG_DUPLEX_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x0008)
+#define HBG_REG_FD_FC_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x000C)
+#define HBG_REG_FC_TX_TIMER_ADDR (HBG_REG_SGMII_BASE + 0x001C)
+#define HBG_REG_FD_FC_ADDR_LOW_ADDR (HBG_REG_SGMII_BASE + 0x0020)
+#define HBG_REG_FD_FC_ADDR_HIGH_ADDR (HBG_REG_SGMII_BASE + 0x0024)
#define HBG_REG_DUPLEX_B BIT(0)
#define HBG_REG_MAX_FRAME_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x003C)
#define HBG_REG_PORT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x0040)
@@ -43,20 +57,91 @@
#define HBG_REG_PORT_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0044)
#define HBG_REG_PORT_ENABLE_RX_B BIT(1)
#define HBG_REG_PORT_ENABLE_TX_B BIT(2)
+#define HBG_REG_PAUSE_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0048)
+#define HBG_REG_PAUSE_ENABLE_RX_B BIT(0)
+#define HBG_REG_PAUSE_ENABLE_TX_B BIT(1)
+#define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058)
+#define HBG_REG_AN_NEG_STATE_NP_LINK_OK_B BIT(15)
#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060)
#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7)
#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6)
#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5)
+#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064)
+#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0)
+#define HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B BIT(4)
+#define HBG_REG_RX_OCTETS_TOTAL_OK_ADDR (HBG_REG_SGMII_BASE + 0x0080)
+#define HBG_REG_RX_OCTETS_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0084)
+#define HBG_REG_RX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0088)
+#define HBG_REG_RX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x008C)
+#define HBG_REG_RX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0090)
+#define HBG_REG_RX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0094)
+#define HBG_REG_RX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0098)
+#define HBG_REG_RX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x009C)
+#define HBG_REG_RX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A0)
+#define HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A4)
+#define HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A8)
+#define HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00AC)
+#define HBG_REG_RX_FCS_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00B0)
+#define HBG_REG_RX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x00B4)
+#define HBG_REG_RX_DATA_ERR_ADDR (HBG_REG_SGMII_BASE + 0x00B8)
+#define HBG_REG_RX_ALIGN_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00BC)
+#define HBG_REG_RX_LONG_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C0)
+#define HBG_REG_RX_JABBER_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C4)
+#define HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00C8)
+#define HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00CC)
+#define HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D0)
+#define HBG_REG_RX_RUNT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D4)
+#define HBG_REG_RX_SHORT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D8)
+#define HBG_REG_RX_FILT_PKT_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00E8)
+#define HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR (HBG_REG_SGMII_BASE + 0x00EC)
+#define HBG_REG_OCTETS_TRANSMITTED_OK_ADDR (HBG_REG_SGMII_BASE + 0x0100)
+#define HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0104)
+#define HBG_REG_TX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0108)
+#define HBG_REG_TX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x010C)
+#define HBG_REG_TX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0110)
+#define HBG_REG_TX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0114)
+#define HBG_REG_TX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0118)
+#define HBG_REG_TX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x011C)
+#define HBG_REG_TX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0120)
+#define HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0124)
+#define HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0128)
+#define HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x012C)
+#define HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR (HBG_REG_SGMII_BASE + 0x014C)
+#define HBG_REG_TX_UNDERRUN_ADDR (HBG_REG_SGMII_BASE + 0x0150)
+#define HBG_REG_TX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x0154)
+#define HBG_REG_TX_CRC_ERROR_ADDR (HBG_REG_SGMII_BASE + 0x0158)
+#define HBG_REG_TX_PAUSE_FRAMES_ADDR (HBG_REG_SGMII_BASE + 0x015C)
+#define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8)
#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0)
#define HBG_REG_CF_CRC_STRIP_B BIT(0)
#define HBG_REG_MODE_CHANGE_EN_ADDR (HBG_REG_SGMII_BASE + 0x01B4)
#define HBG_REG_MODE_CHANGE_EN_B BIT(0)
+#define HBG_REG_LOOP_REG_ADDR (HBG_REG_SGMII_BASE + 0x01DC)
#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0)
#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3)
+#define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8)
+#define HBG_REG_RX_OVERRUN_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01EC)
+#define HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F4)
+#define HBG_REG_RX_FAIL_COMMA_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F8)
+#define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200)
+#define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204)
+#define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208)
+#define HBG_REG_STATION_ADDR_HIGH_1_ADDR (HBG_REG_SGMII_BASE + 0x020C)
#define HBG_REG_STATION_ADDR_LOW_2_ADDR (HBG_REG_SGMII_BASE + 0x0210)
#define HBG_REG_STATION_ADDR_HIGH_2_ADDR (HBG_REG_SGMII_BASE + 0x0214)
+#define HBG_REG_STATION_ADDR_LOW_3_ADDR (HBG_REG_SGMII_BASE + 0x0218)
+#define HBG_REG_STATION_ADDR_HIGH_3_ADDR (HBG_REG_SGMII_BASE + 0x021C)
+#define HBG_REG_STATION_ADDR_LOW_4_ADDR (HBG_REG_SGMII_BASE + 0x0220)
+#define HBG_REG_STATION_ADDR_HIGH_4_ADDR (HBG_REG_SGMII_BASE + 0x0224)
+#define HBG_REG_STATION_ADDR_LOW_5_ADDR (HBG_REG_SGMII_BASE + 0x0228)
+#define HBG_REG_STATION_ADDR_HIGH_5_ADDR (HBG_REG_SGMII_BASE + 0x022C)
+#define HBG_REG_STATION_ADDR_LOW_MSK_0 (HBG_REG_SGMII_BASE + 0x0230)
+#define HBG_REG_STATION_ADDR_LOW_MSK_1 (HBG_REG_SGMII_BASE + 0x0238)
/* PCU */
+#define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420)
+#define HBG_REG_RX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0424)
+#define HBG_REG_CFG_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0428)
#define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C)
#define HBG_INT_MSK_WE_ERR_B BIT(31)
#define HBG_INT_MSK_RBREQ_ERR_B BIT(30)
@@ -74,15 +159,27 @@
#define HBG_INT_MSK_MAC_PCS_TX_FIFO_ERR_B BIT(17)
#define HBG_INT_MSK_MAC_PCS_RX_FIFO_ERR_B BIT(16)
#define HBG_INT_MSK_MAC_MII_FIFO_ERR_B BIT(15)
+#define HBG_INT_MSK_TX_PKT_CPL_B BIT(14)
#define HBG_INT_MSK_TX_B BIT(1) /* just used in driver */
#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */
#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434)
#define HBG_REG_CF_INTRPT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x0438)
+#define HBG_REG_TX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x043C)
+#define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440)
#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444)
#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0)
+#define HBG_REG_TX_DROP_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0448)
+#define HBG_REG_RX_OVER_FLOW_CNT_ADDR (HBG_REG_SGMII_BASE + 0x044C)
+#define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450)
+#define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454)
+#define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16)
+#define HBG_REG_TX_CS_FAIL_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0460)
+#define HBG_REG_RX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0464)
+#define HBG_REG_TX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0468)
+#define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470)
#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488)
#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C)
#define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490)
@@ -101,6 +198,13 @@
#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0)
#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4)
#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21)
+#define HBG_REG_RX_BUFRQ_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x058C)
+#define HBG_REG_TX_BUFRL_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0590)
+#define HBG_REG_RX_WE_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0594)
+#define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4)
+#define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8)
+#define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC)
+#define HBG_REG_BUS_RST_EN_ADDR (HBG_REG_SGMII_BASE + 0x0688)
#define HBG_REG_CF_IND_TXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x0694)
#define HBG_REG_IND_INTR_MASK_B BIT(0)
#define HBG_REG_CF_IND_TXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0698)
@@ -139,5 +243,48 @@ struct hbg_rx_desc {
};
#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16)
+#define HBG_RX_DESC_W2_PORT_NUM_M GENMASK(15, 12)
+#define HBG_RX_DESC_W4_IP_TCP_UDP_M GENMASK(31, 30)
+#define HBG_RX_DESC_W4_IPSEC_B BIT(29)
+#define HBG_RX_DESC_W4_IP_VERSION_B BIT(28)
+#define HBG_RX_DESC_W4_L4_ERR_CODE_M GENMASK(26, 23)
+#define HBG_RX_DESC_W4_FRAG_B BIT(22)
+#define HBG_RX_DESC_W4_OPT_B BIT(21)
+#define HBG_RX_DESC_W4_IP_VERSION_ERR_B BIT(20)
+#define HBG_RX_DESC_W4_BRD_CST_B BIT(19)
+#define HBG_RX_DESC_W4_MUL_CST_B BIT(18)
+#define HBG_RX_DESC_W4_ARP_B BIT(17)
+#define HBG_RX_DESC_W4_RARP_B BIT(16)
+#define HBG_RX_DESC_W4_ICMP_B BIT(15)
+#define HBG_RX_DESC_W4_VLAN_FLAG_B BIT(14)
+#define HBG_RX_DESC_W4_DROP_B BIT(13)
+#define HBG_RX_DESC_W4_L3_ERR_CODE_M GENMASK(12, 9)
+#define HBG_RX_DESC_W4_L2_ERR_B BIT(8)
+#define HBG_RX_DESC_W4_IDX_MATCH_B BIT(7)
+
+enum hbg_l3_err_code {
+ HBG_L3_OK = 0,
+ HBG_L3_WRONG_HEAD,
+ HBG_L3_CSUM_ERR,
+ HBG_L3_LEN_ERR,
+ HBG_L3_ZERO_TTL,
+ HBG_L3_RSVD,
+};
+
+enum hbg_l4_err_code {
+ HBG_L4_OK = 0,
+ HBG_L4_WRONG_HEAD,
+ HBG_L4_LEN_ERR,
+ HBG_L4_CSUM_ERR,
+ HBG_L4_ZERO_PORT_NUM,
+ HBG_L4_RSVD,
+};
+
+enum hbg_pkt_type_code {
+ HBG_NO_IP_PKT = 0,
+ HBG_IP_PKT,
+ HBG_TCP_PKT,
+ HBG_UDP_PKT,
+};
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
index f4f256a0dfea..8d814c8f19ea 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
@@ -38,8 +38,14 @@ static int hbg_dma_map(struct hbg_buffer *buffer)
buffer->skb_dma = dma_map_single(&priv->pdev->dev,
buffer->skb->data, buffer->skb_len,
buffer_to_dma_dir(buffer));
- if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma)))
+ if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) {
+ if (buffer->dir == HBG_DIR_RX)
+ priv->stats.rx_dma_err_cnt++;
+ else
+ priv->stats.tx_dma_err_cnt++;
+
return -ENOMEM;
+ }
return 0;
}
@@ -195,6 +201,173 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
return packet_done;
}
+static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ bool rx_checksum_offload = !!(priv->netdev->features & NETIF_F_RXCSUM);
+
+ skb->ip_summed = rx_checksum_offload ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
+
+ if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) &&
+ !FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)))
+ return true;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4)) {
+ case HBG_L3_OK:
+ break;
+ case HBG_L3_WRONG_HEAD:
+ priv->stats.rx_desc_l3_wrong_head_cnt++;
+ return false;
+ case HBG_L3_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l3_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L3_LEN_ERR:
+ priv->stats.rx_desc_l3_len_err_cnt++;
+ return false;
+ case HBG_L3_ZERO_TTL:
+ priv->stats.rx_desc_l3_zero_ttl_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l3_other_cnt++;
+ return false;
+ }
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)) {
+ case HBG_L4_OK:
+ break;
+ case HBG_L4_WRONG_HEAD:
+ priv->stats.rx_desc_l4_wrong_head_cnt++;
+ return false;
+ case HBG_L4_LEN_ERR:
+ priv->stats.rx_desc_l4_len_err_cnt++;
+ return false;
+ case HBG_L4_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l4_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L4_ZERO_PORT_NUM:
+ priv->stats.rx_desc_l4_zero_port_num_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l4_other_cnt++;
+ return false;
+ }
+
+ return true;
+}
+
+static void hbg_update_rx_ip_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4))) {
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ return;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_ip_ver_err_cnt++;
+ return;
+ }
+
+ /* 0:ipv4, 1:ipv6 */
+ if (FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_B, desc->word4))
+ priv->stats.rx_desc_ipv6_pkt_cnt++;
+ else
+ priv->stats.rx_desc_ipv4_pkt_cnt++;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4)) {
+ case HBG_IP_PKT:
+ priv->stats.rx_desc_ip_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_OPT_B, desc->word4))
+ priv->stats.rx_desc_ip_opt_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_FRAG_B, desc->word4))
+ priv->stats.rx_desc_frag_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ICMP_B, desc->word4))
+ priv->stats.rx_desc_icmp_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_IPSEC_B, desc->word4))
+ priv->stats.rx_desc_ipsec_pkt_cnt++;
+ break;
+ case HBG_TCP_PKT:
+ priv->stats.rx_desc_tcp_pkt_cnt++;
+ break;
+ case HBG_UDP_PKT:
+ priv->stats.rx_desc_udp_pkt_cnt++;
+ break;
+ default:
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ break;
+ }
+}
+
+static void hbg_update_rx_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IDX_MATCH_B, desc->word4))) {
+ priv->stats.rx_desc_key_not_match_cnt++;
+ return;
+ }
+
+ if (FIELD_GET(HBG_RX_DESC_W4_BRD_CST_B, desc->word4))
+ priv->stats.rx_desc_broadcast_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_MUL_CST_B, desc->word4))
+ priv->stats.rx_desc_multicast_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_VLAN_FLAG_B, desc->word4))
+ priv->stats.rx_desc_vlan_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ARP_B, desc->word4)) {
+ priv->stats.rx_desc_arp_pkt_cnt++;
+ return;
+ } else if (FIELD_GET(HBG_RX_DESC_W4_RARP_B, desc->word4)) {
+ priv->stats.rx_desc_rarp_pkt_cnt++;
+ return;
+ }
+
+ hbg_update_rx_ip_protocol_stats(priv, desc);
+}
+
+static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, desc->word2) >
+ priv->dev_specs.max_frame_len)) {
+ priv->stats.rx_desc_pkt_len_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M, desc->word2) !=
+ priv->dev_specs.mac_id ||
+ FIELD_GET(HBG_RX_DESC_W4_DROP_B, desc->word4))) {
+ priv->stats.rx_desc_drop++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_l2_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(!hbg_rx_check_l3l4_error(priv, desc, skb))) {
+ priv->stats.rx_desc_l3l4_err_cnt++;
+ return false;
+ }
+
+ hbg_update_rx_protocol_stats(priv, desc);
+ return true;
+}
+
static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
{
struct hbg_ring *ring = &priv->rx_ring;
@@ -257,8 +430,12 @@ static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
- hbg_dma_unmap(buffer);
+ if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) {
+ hbg_buffer_free(buffer);
+ goto next_buffer;
+ }
+ hbg_dma_unmap(buffer);
skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
skb_put(buffer->skb, pkt_len);
buffer->skb->protocol = eth_type_trans(buffer->skb,
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index a376d4bdf281..18376bcc718a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -934,8 +934,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->chan = arg.args[1] * RX_DESC_NUM;
priv->group = arg.args[2];
- hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
/* BQL will try to keep the TX queue as short as possible, but it can't
* be faster than tx_coalesce_usecs, so we need a fast timeout here,
* but also long enough to gather up enough frames to ensure we don't
@@ -944,7 +942,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
*/
priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
priv->tx_coalesce_usecs = 200;
- priv->tx_coalesce_timer.function = tx_done;
+ hrtimer_setup(&priv->tx_coalesce_timer, tx_done, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
priv->map = syscon_node_to_regmap(arg.np);
of_node_put(arg.np);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 851490346261..6b6ced37e490 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3019,115 +3019,6 @@ static struct platform_driver g_dsaf_driver = {
module_platform_driver(g_dsaf_driver);
-/**
- * hns_dsaf_roce_reset - reset dsaf and roce
- * @dsaf_fwnode: Pointer to framework node for the dasf
- * @dereset: false - request reset , true - drop reset
- * return 0 - success , negative -fail
- */
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
-{
- struct dsaf_device *dsaf_dev;
- struct platform_device *pdev;
- u32 mp;
- u32 sl;
- u32 credit;
- int i;
- static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- };
- static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- };
-
- /* find the platform device corresponding to fwnode */
- if (is_of_node(dsaf_fwnode)) {
- pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
- } else if (is_acpi_device_node(dsaf_fwnode)) {
- pdev = hns_dsaf_find_platform_device(dsaf_fwnode);
- } else {
- pr_err("fwnode is neither OF or ACPI type\n");
- return -EINVAL;
- }
-
- /* check if we were a success in fetching pdev */
- if (!pdev) {
- pr_err("couldn't find platform device for node\n");
- return -ENODEV;
- }
-
- /* retrieve the dsaf_device from the driver data */
- dsaf_dev = dev_get_drvdata(&pdev->dev);
- if (!dsaf_dev) {
- dev_err(&pdev->dev, "dsaf_dev is NULL\n");
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* now, make sure we are running on compatible SoC */
- if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
- dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
- dsaf_dev->ae_dev.name);
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* do reset or de-reset according to the flag */
- if (!dereset) {
- /* reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- false);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false);
- } else {
- /* configure dsaf tx roce correspond to port map and sl map */
- mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(mp, 7 << i * 3, i * 3,
- port_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_set_field(mp, 3 << i * 3, i * 3, 0);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp);
-
- sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(sl, 3 << i * 2, i * 2,
- sl_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
-
- /* de-reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- true);
- msleep(SRST_TIME_INTERVAL);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true);
-
- /* enable dsaf channel rocee credit */
- credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
-
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
- }
-
- put_device(&pdev->dev);
-
- return 0;
-}
-EXPORT_SYMBOL(hns_dsaf_roce_reset);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_DESCRIPTION("HNS DSAF driver");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0eb03dff1a8b..653dfbb25d1b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -42,29 +42,6 @@ struct hns_mac_cb;
#define HNS_MAX_WAIT_CNT 10000
-enum dsaf_roce_port_mode {
- DSAF_ROCE_6PORT_MODE,
- DSAF_ROCE_4PORT_MODE,
- DSAF_ROCE_2PORT_MODE,
- DSAF_ROCE_CHAN_MODE_NUM,
-};
-
-enum dsaf_roce_port_num {
- DSAF_ROCE_PORT_0,
- DSAF_ROCE_PORT_1,
- DSAF_ROCE_PORT_2,
- DSAF_ROCE_PORT_3,
- DSAF_ROCE_PORT_4,
- DSAF_ROCE_PORT_5,
-};
-
-enum dsaf_roce_qos_sl {
- DSAF_ROCE_SL_0,
- DSAF_ROCE_SL_1,
- DSAF_ROCE_SL_2,
- DSAF_ROCE_SL_3,
-};
-
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
@@ -307,9 +284,6 @@ struct dsaf_misc_op {
void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
- void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk,
- bool dereset);
- void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset);
phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
@@ -463,6 +437,4 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
-
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 5df19c604d09..91391a49fcea 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -326,69 +326,6 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
HNS_XGE_RESET_FUNC, port, dereset);
}
-/**
- * hns_dsaf_srst_chns - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- u32 reg_addr;
-
- if (!dereset)
- reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
- else
- reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
-
- dsaf_write_sub(dsaf_dev, reg_addr, msk);
-}
-
-/**
- * hns_dsaf_srst_chns_acpi - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_DSAF_CHN_RESET_FUNC,
- msk, dereset);
-}
-
-static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
-{
- if (!dereset) {
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
- } else {
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1);
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1);
- msleep(20);
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1);
- }
-}
-
-static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_ROCE_RESET_FUNC, 0, dereset);
-}
-
static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
bool dereset)
{
@@ -729,8 +666,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
misc_op->ppe_srst = hns_ppe_srst_by_port;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst;
misc_op->get_phy_if = hns_mac_get_phy_if;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
@@ -746,8 +681,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 46af467aa596..635b3a95dd82 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -195,11 +195,6 @@ void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
}
-void hns_rcb_start(struct hnae_queue *q, u32 val)
-{
- hns_rcb_ring_enable_hw(q, val);
-}
-
/**
*hns_rcb_common_init_commit_hw - make rcb common init completed
*@rcb_common: rcb common device
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 0f4cc184ef39..68f81547dfb4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -116,7 +116,6 @@ int hns_rcb_buf_size2type(u32 buf_size);
int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
-void hns_rcb_start(struct hnae_queue *q, u32 val);
int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 42bb341fd80b..e905f10b894e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1402,7 +1402,7 @@ static void hns_nic_net_down(struct net_device *ndev)
if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
return;
- (void)del_timer_sync(&priv->service_timer);
+ (void) timer_delete_sync(&priv->service_timer);
netif_tx_stop_all_queues(ndev);
netif_carrier_off(ndev);
netif_tx_disable(ndev);
@@ -2075,7 +2075,7 @@ static void hns_nic_task_schedule(struct hns_nic_priv *priv)
static void hns_nic_service_timer(struct timer_list *t)
{
- struct hns_nic_priv *priv = from_timer(priv, t, service_timer);
+ struct hns_nic_priv *priv = timer_container_of(priv, t, service_timer);
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 6c458f037262..60a586a951a0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -266,9 +266,9 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
if (err)
goto out;
- err = phy_loopback(phy_dev, true);
+ err = phy_loopback(phy_dev, true, 0);
} else {
- err = phy_loopback(phy_dev, false);
+ err = phy_loopback(phy_dev, false, 0);
if (err)
goto out;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 9a63fbc69408..b25fb400f476 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -40,6 +40,21 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
*/
static DEFINE_MUTEX(hnae3_common_lock);
+/* ensure the drivers being unloaded one by one */
+static DEFINE_MUTEX(hnae3_unload_lock);
+
+void hnae3_acquire_unload_lock(void)
+{
+ mutex_lock(&hnae3_unload_lock);
+}
+EXPORT_SYMBOL(hnae3_acquire_unload_lock);
+
+void hnae3_release_unload_lock(void)
+{
+ mutex_unlock(&hnae3_unload_lock);
+}
+EXPORT_SYMBOL(hnae3_release_unload_lock);
+
static bool hnae3_client_match(enum hnae3_client_type client_type)
{
if (client_type == HNAE3_CLIENT_KNIC ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 710a8f9f2248..4e44f28288f9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -916,9 +916,6 @@ struct hnae3_handle {
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
- /* protects concurrent contention between debugfs commands */
- struct mutex dbgfs_lock;
- char **dbgfs_buf;
/* Network interface message level enabled bits */
u32 msg_enable;
@@ -966,4 +963,6 @@ int hnae3_register_client(struct hnae3_client *client);
void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev,
unsigned int inited);
+void hnae3_acquire_unload_lock(void);
+void hnae3_release_unload_lock(void);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 807eb3bbb11c..4e5d8bc39a1b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/string_choices.h>
#include "hnae3.h"
#include "hns3_debugfs.h"
@@ -60,7 +61,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "tm_qset",
.cmd = HNAE3_DBG_CMD_TM_QSET,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
.init = hns3_dbg_common_file_init,
},
{
@@ -661,12 +662,14 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
sprintf(result[j++], "%u", ring->rx_copybreak);
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_EN_REG)));
else
sprintf(result[j++], "%s", "NA");
@@ -764,12 +767,14 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_EN_REG)));
else
sprintf(result[j++], "%s", "NA");
@@ -1030,7 +1035,6 @@ static void
hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const char * const str[] = {"no", "yes"};
unsigned long *caps = ae_dev->caps;
u32 i, state;
@@ -1039,7 +1043,7 @@ hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) {
state = test_bit(hns3_dbg_cap[i].cap_bit, caps);
*pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n",
- hns3_dbg_cap[i].name, str[state]);
+ hns3_dbg_cap[i].name, str_yes_no(state));
}
*pos += scnprintf(buf + *pos, len - *pos, "\n");
@@ -1260,69 +1264,55 @@ static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
- struct hns3_dbg_data *dbg_data = filp->private_data;
+ char *buf = filp->private_data;
+
+ return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+}
+
+static int hns3_dbg_open(struct inode *inode, struct file *filp)
+{
+ struct hns3_dbg_data *dbg_data = inode->i_private;
struct hnae3_handle *handle = dbg_data->handle;
struct hns3_nic_priv *priv = handle->priv;
- ssize_t size = 0;
- char **save_buf;
- char *read_buf;
u32 index;
+ char *buf;
int ret;
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EBUSY;
+
ret = hns3_dbg_get_cmd_index(dbg_data, &index);
if (ret)
return ret;
- mutex_lock(&handle->dbgfs_lock);
- save_buf = &handle->dbgfs_buf[index];
-
- if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
- test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
- ret = -EBUSY;
- goto out;
- }
-
- if (*save_buf) {
- read_buf = *save_buf;
- } else {
- read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
- if (!read_buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* save the buffer addr until the last read operation */
- *save_buf = read_buf;
-
- /* get data ready for the first time to read */
- ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
- read_buf, hns3_dbg_cmd[index].buf_len);
- if (ret)
- goto out;
- }
+ buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- size = simple_read_from_buffer(buffer, count, ppos, read_buf,
- strlen(read_buf));
- if (size > 0) {
- mutex_unlock(&handle->dbgfs_lock);
- return size;
+ ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
+ buf, hns3_dbg_cmd[index].buf_len);
+ if (ret) {
+ kvfree(buf);
+ return ret;
}
-out:
- /* free the buffer for the last read operation */
- if (*save_buf) {
- kvfree(*save_buf);
- *save_buf = NULL;
- }
+ filp->private_data = buf;
+ return 0;
+}
- mutex_unlock(&handle->dbgfs_lock);
- return ret;
+static int hns3_dbg_release(struct inode *inode, struct file *filp)
+{
+ kvfree(filp->private_data);
+ filp->private_data = NULL;
+ return 0;
}
static const struct file_operations hns3_dbg_fops = {
.owner = THIS_MODULE,
- .open = simple_open,
+ .open = hns3_dbg_open,
.read = hns3_dbg_read,
+ .release = hns3_dbg_release,
};
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
@@ -1379,13 +1369,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
int ret;
u32 i;
- handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
- ARRAY_SIZE(hns3_dbg_cmd),
- sizeof(*handle->dbgfs_buf),
- GFP_KERNEL);
- if (!handle->dbgfs_buf)
- return -ENOMEM;
-
hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
debugfs_create_dir(name, hns3_dbgfs_root);
handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
@@ -1395,8 +1378,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
debugfs_create_dir(hns3_dbg_dentry[i].name,
handle->hnae3_dbgfs);
- mutex_init(&handle->dbgfs_lock);
-
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
@@ -1425,24 +1406,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
out:
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
- mutex_destroy(&handle->dbgfs_lock);
return ret;
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
{
- u32 i;
-
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
- if (handle->dbgfs_buf[i]) {
- kvfree(handle->dbgfs_buf[i]);
- handle->dbgfs_buf[i] = NULL;
- }
-
- mutex_destroy(&handle->dbgfs_lock);
}
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 43377a7b2426..b03b8758c777 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -473,20 +473,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
writel(mask_en, tqp_vector->mask_addr);
}
-static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
napi_enable(&tqp_vector->napi);
enable_irq(tqp_vector->vector_irq);
-
- /* enable vector */
- hns3_mask_vector_irq(tqp_vector, 1);
}
-static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
{
- /* disable vector */
- hns3_mask_vector_irq(tqp_vector, 0);
-
disable_irq(tqp_vector->vector_irq);
napi_disable(&tqp_vector->napi);
cancel_work_sync(&tqp_vector->rx_group.dim.work);
@@ -707,11 +701,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
return 0;
}
+static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+}
+
+static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_disable(&priv->tqp_vector[i]);
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- int i, j;
int ret;
ret = hns3_nic_reset_all_ring(h);
@@ -720,23 +745,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- /* enable the vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- /* enable rcb */
- for (j = 0; j < h->kinfo.num_tqps; j++)
- hns3_tqp_enable(h->kinfo.tqp[j]);
+ hns3_enable_irqs_and_tqps(netdev);
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret) {
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- while (j--)
- hns3_tqp_disable(h->kinfo.tqp[j]);
-
- for (j = i - 1; j >= 0; j--)
- hns3_vector_disable(&priv->tqp_vector[j]);
+ hns3_disable_irqs_and_tqps(netdev);
}
return ret;
@@ -823,17 +838,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
- int i;
- /* disable vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
-
- /* disable rcb */
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(netdev);
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
@@ -2452,7 +2459,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
return ret;
}
- netdev->features = features;
return 0;
}
@@ -5865,8 +5871,6 @@ int hns3_set_channels(struct net_device *netdev,
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5877,11 +5881,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
netif_carrier_off(ndev);
netif_tx_disable(ndev);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(ndev);
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
@@ -5897,7 +5897,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5913,11 +5912,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_enable(h->kinfo.tqp[i]);
+ hns3_enable_irqs_and_tqps(ndev);
netif_tx_wake_all_queues(ndev);
@@ -6003,9 +5998,11 @@ module_init(hns3_init_module);
*/
static void __exit hns3_exit_module(void)
{
+ hnae3_acquire_unload_lock();
pci_unregister_driver(&hns3_driver);
hnae3_unregister_client(&client);
hns3_dbg_unregister_debugfs();
+ hnae3_release_unload_lock();
}
module_exit(hns3_exit_module);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b771a2daba43..6715222aeb66 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/phy.h>
#include <linux/sfp.h>
@@ -1198,7 +1199,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
return 0;
netdev_dbg(netdev, "Changing tx push from %s to %s\n",
- old_state ? "on" : "off", tx_push ? "on" : "off");
+ str_on_off(old_state), str_on_off(tx_push));
if (tx_push)
set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index debf143e9940..c46490693594 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/device.h>
#include <linux/sched/clock.h>
+#include <linux/string_choices.h>
#include "hclge_debugfs.h"
#include "hclge_err.h"
@@ -11,7 +12,6 @@
#include "hclge_tm.h"
#include "hnae3.h"
-static const char * const state_str[] = { "off", "on" };
static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
@@ -2573,7 +2573,7 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
HCLGE_MAC_APP_LP_B);
pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2586,22 +2586,22 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
loopback_en = req_common->enable &
HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
if (phydev) {
loopback_en = phydev->loopback_enabled;
pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
} else if (hnae3_dev_phy_imp_supported(hdev)) {
loopback_en = req_common->enable &
HCLGE_CMD_GE_PHY_INNER_LOOP_B;
pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
}
return 0;
@@ -2894,9 +2894,9 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
*pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
- state_str[ingress]);
+ str_on_off(ingress));
*pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
- state_str[egress]);
+ str_on_off(egress));
hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
NULL, ARRAY_SIZE(vlan_filter_items));
@@ -2915,11 +2915,11 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
return ret;
j = 0;
result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = state_str[ingress];
- result[j++] = state_str[egress];
- result[j++] =
- test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
- hdev->ae_dev->caps) ? state_str[bypass] : "NA";
+ result[j++] = str_on_off(ingress);
+ result[j++] = str_on_off(egress);
+ result[j++] = test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ?
+ str_on_off(bypass) : "NA";
hclge_dbg_fill_content(content, sizeof(content),
vlan_filter_items, result,
ARRAY_SIZE(vlan_filter_items));
@@ -2958,19 +2958,19 @@ static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
j = 0;
result[j++] = hclge_dbg_get_func_id_str(str_id, i);
result[j++] = str_pvid;
- result[j++] = state_str[vlan_cfg.accept_tag1];
- result[j++] = state_str[vlan_cfg.accept_tag2];
- result[j++] = state_str[vlan_cfg.accept_untag1];
- result[j++] = state_str[vlan_cfg.accept_untag2];
- result[j++] = state_str[vlan_cfg.insert_tag1];
- result[j++] = state_str[vlan_cfg.insert_tag2];
- result[j++] = state_str[vlan_cfg.shift_tag];
- result[j++] = state_str[vlan_cfg.strip_tag1];
- result[j++] = state_str[vlan_cfg.strip_tag2];
- result[j++] = state_str[vlan_cfg.drop_tag1];
- result[j++] = state_str[vlan_cfg.drop_tag2];
- result[j++] = state_str[vlan_cfg.pri_only1];
- result[j++] = state_str[vlan_cfg.pri_only2];
+ result[j++] = str_on_off(vlan_cfg.accept_tag1);
+ result[j++] = str_on_off(vlan_cfg.accept_tag2);
+ result[j++] = str_on_off(vlan_cfg.accept_untag1);
+ result[j++] = str_on_off(vlan_cfg.accept_untag2);
+ result[j++] = str_on_off(vlan_cfg.insert_tag1);
+ result[j++] = str_on_off(vlan_cfg.insert_tag2);
+ result[j++] = str_on_off(vlan_cfg.shift_tag);
+ result[j++] = str_on_off(vlan_cfg.strip_tag1);
+ result[j++] = str_on_off(vlan_cfg.strip_tag2);
+ result[j++] = str_on_off(vlan_cfg.drop_tag1);
+ result[j++] = str_on_off(vlan_cfg.drop_tag2);
+ result[j++] = str_on_off(vlan_cfg.pri_only1);
+ result[j++] = str_on_off(vlan_cfg.pri_only2);
hclge_dbg_fill_content(content, sizeof(content),
vlan_offload_items, result,
@@ -3007,14 +3007,13 @@ static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
ptp->info.name);
pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
- "yes" : "no");
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags)));
pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
- "yes" : "no");
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN,
+ &ptp->flags)));
pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
- "yes" : "no");
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN,
+ &ptp->flags)));
last_rx = jiffies_to_msecs(ptp->last_rx);
pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 05942fa78b11..a7de67699a01 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -6,6 +6,7 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -3574,6 +3575,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
return ret;
}
+static void hclge_set_reset_pending(struct hclge_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
@@ -3594,7 +3606,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
@@ -3604,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
- set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
@@ -3759,7 +3771,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- 0, hdev->misc_vector.name, hdev);
+ IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -4052,7 +4064,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF reset requested\n");
/* schedule again to check later */
- set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
hclge_reset_task_schedule(hdev);
break;
default:
@@ -4086,6 +4098,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
if (hdev->reset_type != HNAE3_NONE_RESET &&
rst_level < hdev->reset_type)
return HNAE3_NONE_RESET;
@@ -4227,7 +4241,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
return false;
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->rst_stats.reset_fail_cnt++;
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, hdev->reset_type);
dev_info(&hdev->pdev->dev,
"re-schedule reset task(%u)\n",
hdev->rst_stats.reset_fail_cnt);
@@ -4470,14 +4484,26 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGE_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
+ BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
+
struct hclge_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
+
set_bit(rst_type, &hdev->default_reset_request);
}
static void hclge_reset_timer(struct timer_list *t)
{
- struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+ struct hclge_dev *hdev = timer_container_of(hdev, t, reset_timer);
/* if default_reset_request has no value, it means that this reset
* request has already be handled, so just return here
@@ -7849,7 +7875,7 @@ static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
if (ret)
return ret;
- return phy_loopback(phydev, true);
+ return phy_loopback(phydev, true, 0);
}
static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
@@ -7857,7 +7883,7 @@ static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
{
int ret;
- ret = phy_loopback(phydev, false);
+ ret = phy_loopback(phydev, false, 0);
if (ret)
return ret;
@@ -7974,7 +8000,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
ret = hclge_tqp_enable(handle, en);
if (ret)
dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -11174,9 +11200,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
- handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.dcb_ets_active));
dev_info(dev, "MQPRIO %s\n",
- handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.mqprio_active));
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);
@@ -11466,7 +11492,7 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_REMOVING, &hdev->state);
if (hdev->reset_timer.function)
- del_timer_sync(&hdev->reset_timer);
+ timer_delete_sync(&hdev->reset_timer);
if (hdev->service_task.work.func)
cancel_delayed_work_sync(&hdev->service_task);
}
@@ -11881,9 +11907,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_init_rxd_adv_layout(hdev);
- /* Enable MISC vector(vector0) */
- hclge_enable_vector(&hdev->misc_vector, true);
-
ret = hclge_init_wol(hdev);
if (ret)
dev_warn(&pdev->dev,
@@ -11896,6 +11919,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
+ /* Enable MISC vector(vector0) */
+ enable_irq(hdev->misc_vector.vector_irq);
+ hclge_enable_vector(&hdev->misc_vector, true);
+
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -11949,7 +11976,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret) {
dev_err(&hdev->pdev->dev,
"Set vf %d mac spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -11957,7 +11984,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret)
dev_err(&hdev->pdev->dev,
"Set vf %d vlan spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -12301,7 +12328,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- synchronize_irq(hdev->misc_vector.vector_irq);
+ disable_irq(hdev->misc_vector.vector_irq);
/* Disable all hw interrupts */
hclge_config_mac_tnl_int(hdev, false);
@@ -12892,9 +12919,11 @@ static int __init hclge_init(void)
static void __exit hclge_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
+ hnae3_release_unload_lock();
}
module_init(hclge_init);
module_exit(hclge_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 80079657afeb..9a456ebf9b7c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -258,7 +258,7 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return;
- phy_loopback(phydev, false);
+ phy_loopback(phydev, false, 0);
phy_start(phydev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 5505caea88e9..ec581d4b696f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -2,6 +2,7 @@
// Copyright (c) 2021 Hisilicon Limited.
#include <linux/skbuff.h>
+#include <linux/string_choices.h>
#include "hclge_main.h"
#include "hnae3.h"
@@ -58,6 +59,9 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
struct hclge_dev *hdev = vport->back;
struct hclge_ptp *ptp = hdev->ptp;
+ if (!ptp)
+ return false;
+
if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
ptp->tx_skipped++;
@@ -223,7 +227,7 @@ static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
if (ret)
dev_err(&hdev->pdev->dev,
"failed to %s ptp interrupt, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -436,6 +440,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
ptp->info.settime64 = hclge_ptp_settime;
ptp->info.n_alarm = 0;
+
+ spin_lock_init(&ptp->lock);
+ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hdev->ptp = ptp;
+
ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
if (IS_ERR(ptp->clock)) {
dev_err(&hdev->pdev->dev,
@@ -447,12 +458,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
return -ENODEV;
}
- spin_lock_init(&ptp->lock);
- ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
- ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
- ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
- hdev->ptp = ptp;
-
return 0;
}
@@ -480,7 +485,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
ret = hclge_ptp_get_cycle(hdev);
if (ret)
- return ret;
+ goto out;
}
ret = hclge_ptp_int_en(hdev, true);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
index 43c1c18fa81f..8c057192aae6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
@@ -510,9 +510,9 @@ out:
static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
struct hnae3_knic_private_info *kinfo)
{
-#define HCLGE_RING_REG_OFFSET 0x200
#define HCLGE_RING_INT_REG_OFFSET 0x4
+ struct hnae3_queue *tqp;
int i, j, reg_num;
int data_num_sum;
u32 *reg = data;
@@ -533,10 +533,11 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < kinfo->num_tqps; j++) {
reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
+ tqp = kinfo->tqp[j];
for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGE_RING_REG_OFFSET * j);
+ *reg++ = readl_relaxed(tqp->io_base -
+ HCLGE_TQP_REG_OFFSET +
+ ring_reg_addr_list[i]);
}
data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 2f6ffb88e700..c4f35e8e2177 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1292,9 +1292,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
rtnl_unlock();
}
-static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_vf_to_pf_msg send_msg;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
@@ -1303,6 +1302,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
+static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
+ if (ret)
+ return ret;
+
+ hdev->rxvtag_strip_en = enable;
+ return 0;
+}
+
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
{
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
@@ -1393,6 +1405,17 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
return ret;
}
+static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_US 20000
@@ -1542,7 +1565,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
hdev->rst_stats.rst_fail_cnt);
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, hdev->reset_type);
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
@@ -1662,6 +1685,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
return rst_level;
}
@@ -1671,14 +1696,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
struct hclgevf_dev *hdev = ae_dev->priv;
- dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
-
if (hdev->default_reset_request)
hdev->reset_level =
hclgevf_get_reset_level(&hdev->default_reset_request);
else
hdev->reset_level = HNAE3_VF_FUNC_RESET;
+ dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
+ hdev->reset_level);
+
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
@@ -1689,8 +1715,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGEVF_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
+ BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
+ BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
+
struct hclgevf_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
set_bit(rst_type, &hdev->default_reset_request);
}
@@ -1847,14 +1885,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
*/
if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */
- set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
- set_bit(hdev->reset_level, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, hdev->reset_level);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
hclgevf_reset_task_schedule(hdev);
@@ -1977,7 +2015,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
- set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
@@ -2019,7 +2057,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
static void hclgevf_reset_timer(struct timer_list *t)
{
- struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
+ struct hclgevf_dev *hdev = timer_container_of(hdev, t, reset_timer);
hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
hclgevf_reset_task_schedule(hdev);
@@ -2178,12 +2216,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
tc_valid, tc_size);
}
-static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
+ bool rxvtag_strip_en)
{
struct hnae3_handle *nic = &hdev->nic;
int ret;
- ret = hclgevf_en_hw_strip_rxvtag(nic, true);
+ ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to enable rx vlan offload, ret = %d\n", ret);
@@ -2287,6 +2326,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
+ /* timer needs to be initialized before misc irq */
+ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
mutex_init(&hdev->mbx_resp.mbx_mutex);
sema_init(&hdev->reset_sem, 1);
@@ -2851,7 +2892,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_init_vlan_config(hdev);
+ ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
@@ -2966,7 +3007,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- ret = hclgevf_init_vlan_config(hdev);
+ ret = hclgevf_init_vlan_config(hdev, true);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
@@ -2986,7 +3027,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
HCLGEVF_DRIVER_NAME);
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
- timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
return 0;
@@ -3383,8 +3423,10 @@ static int __init hclgevf_init(void)
static void __exit hclgevf_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo(&ae_algovf);
destroy_workqueue(hclgevf_wq);
+ hnae3_release_unload_lock();
}
module_init(hclgevf_init);
module_exit(hclgevf_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index cccef3228461..0208425ab594 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -253,6 +253,7 @@ struct hclgevf_dev {
int *vector_irq;
bool gro_en;
+ bool rxvtag_strip_en;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 6db415d8b917..7d9d9dbc7560 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
void *data)
{
-#define HCLGEVF_RING_REG_OFFSET 0x200
#define HCLGEVF_RING_INT_REG_OFFSET 0x4
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_queue *tqp;
int i, j, reg_um;
u32 *reg = data;
@@ -147,10 +147,11 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
reg_um = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
+ tqp = &hdev->htqp[j].q;
for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGEVF_RING_REG_OFFSET * j);
+ *reg++ = readl_relaxed(tqp->io_base -
+ HCLGEVF_TQP_REG_OFFSET +
+ ring_reg_addr_list[i]);
}
reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index a1aa6c1f966e..6812be8dc64f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -640,7 +640,7 @@ static struct platform_driver hns_mdio_driver = {
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
- .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
+ .acpi_match_table = hns_mdio_acpi_match,
},
};
diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig
index c05fce15eb51..7d0feb1da158 100644
--- a/drivers/net/ethernet/huawei/Kconfig
+++ b/drivers/net/ethernet/huawei/Kconfig
@@ -16,5 +16,6 @@ config NET_VENDOR_HUAWEI
if NET_VENDOR_HUAWEI
source "drivers/net/ethernet/huawei/hinic/Kconfig"
+source "drivers/net/ethernet/huawei/hinic3/Kconfig"
endif # NET_VENDOR_HUAWEI
diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile
index 2549ad5afe6d..59865b882879 100644
--- a/drivers/net/ethernet/huawei/Makefile
+++ b/drivers/net/ethernet/huawei/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_HINIC) += hinic/
+obj-$(CONFIG_HINIC3) += hinic3/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 890f213da8d1..ae1f523d6841 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -172,6 +172,7 @@ err_init_txq:
hinic_sq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->txqs);
+ nic_dev->txqs = NULL;
return err;
}
@@ -268,6 +269,7 @@ err_init_rxq:
hinic_rq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->rxqs);
+ nic_dev->rxqs = NULL;
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index f81a43d2cdfc..486fb0e20bef 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -469,7 +469,7 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
err = HINIC_MGMT_CMD_UNSUPPORTED;
} else if (err || !out_size || vlan_filter.status) {
dev_err(&pdev->dev,
- "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n",
+ "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n",
err, vlan_filter.status, out_size);
err = -EINVAL;
}
diff --git a/drivers/net/ethernet/huawei/hinic3/Kconfig b/drivers/net/ethernet/huawei/hinic3/Kconfig
new file mode 100644
index 000000000000..ce4331d1387b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Huawei driver configuration
+#
+
+config HINIC3
+ tristate "Huawei 3rd generation network adapters (HINIC3) support"
+ # Fields of HW and management structures are little endian and are
+ # currently not converted
+ depends on !CPU_BIG_ENDIAN
+ depends on X86 || ARM64 || COMPILE_TEST
+ depends on PCI_MSI && 64BIT
+ select AUXILIARY_BUS
+ select PAGE_POOL
+ help
+ This driver supports HiNIC 3rd gen Network Adapter (HINIC3).
+ The driver is supported on X86_64 and ARM64 little endian.
+
+ To compile this driver as a module, choose M here.
+ The module will be called hinic3.
diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile
new file mode 100644
index 000000000000..509dfbfb0e96
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+obj-$(CONFIG_HINIC3) += hinic3.o
+
+hinic3-objs := hinic3_common.o \
+ hinic3_hw_cfg.o \
+ hinic3_hw_comm.o \
+ hinic3_hwdev.o \
+ hinic3_hwif.o \
+ hinic3_irq.o \
+ hinic3_lld.o \
+ hinic3_main.o \
+ hinic3_mbox.o \
+ hinic3_netdev_ops.o \
+ hinic3_nic_cfg.o \
+ hinic3_nic_io.o \
+ hinic3_queue_common.o \
+ hinic3_rx.o \
+ hinic3_tx.o \
+ hinic3_wq.o
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
new file mode 100644
index 000000000000..0aa42068728c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+
+#include "hinic3_common.h"
+
+int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align,
+ gfp_t flag,
+ struct hinic3_dma_addr_align *mem_align)
+{
+ dma_addr_t paddr, align_paddr;
+ void *vaddr, *align_vaddr;
+ u32 real_size = size;
+
+ vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ if (align_paddr == paddr) {
+ align_vaddr = vaddr;
+ goto out;
+ }
+
+ dma_free_coherent(dev, real_size, vaddr, paddr);
+
+ /* realloc memory for align */
+ real_size = size + align;
+ vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ align_vaddr = vaddr + (align_paddr - paddr);
+
+out:
+ mem_align->real_size = real_size;
+ mem_align->ori_vaddr = vaddr;
+ mem_align->ori_paddr = paddr;
+ mem_align->align_vaddr = align_vaddr;
+ mem_align->align_paddr = align_paddr;
+
+ return 0;
+}
+
+void hinic3_dma_free_coherent_align(struct device *dev,
+ struct hinic3_dma_addr_align *mem_align)
+{
+ dma_free_coherent(dev, mem_align->real_size,
+ mem_align->ori_vaddr, mem_align->ori_paddr);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
new file mode 100644
index 000000000000..bb795dace04c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_COMMON_H_
+#define _HINIC3_COMMON_H_
+
+#include <linux/device.h>
+
+#define HINIC3_MIN_PAGE_SIZE 0x1000
+
+struct hinic3_dma_addr_align {
+ u32 real_size;
+
+ void *ori_vaddr;
+ dma_addr_t ori_paddr;
+
+ void *align_vaddr;
+ dma_addr_t align_paddr;
+};
+
+int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align,
+ gfp_t flag,
+ struct hinic3_dma_addr_align *mem_align);
+void hinic3_dma_free_coherent_align(struct device *dev,
+ struct hinic3_dma_addr_align *mem_align);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
new file mode 100644
index 000000000000..87d9450c30ca
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/device.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+bool hinic3_support_nic(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.supp_svcs_bitmap &
+ BIT(HINIC3_SERVICE_T_NIC);
+}
+
+u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.nic_svc_cap.max_sqs;
+}
+
+u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.port_id;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
new file mode 100644
index 000000000000..e017b1ae9f05
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_CFG_H_
+#define _HINIC3_HW_CFG_H_
+
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+struct hinic3_hwdev;
+
+struct hinic3_irq {
+ u32 irq_id;
+ u16 msix_entry_idx;
+ bool allocated;
+};
+
+struct hinic3_irq_info {
+ struct hinic3_irq *irq;
+ u16 num_irq;
+ /* device max irq number */
+ u16 num_irq_hw;
+ /* protect irq alloc and free */
+ struct mutex irq_mutex;
+};
+
+struct hinic3_nic_service_cap {
+ u16 max_sqs;
+};
+
+/* Device capabilities */
+struct hinic3_dev_cap {
+ /* Bitmasks of services supported by device */
+ u16 supp_svcs_bitmap;
+ /* Physical port */
+ u8 port_id;
+ struct hinic3_nic_service_cap nic_svc_cap;
+};
+
+struct hinic3_cfg_mgmt_info {
+ struct hinic3_irq_info irq_info;
+ struct hinic3_dev_cap cap;
+};
+
+int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num,
+ struct msix_entry *alloc_arr, u16 *act_num);
+void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id);
+
+bool hinic3_support_nic(struct hinic3_hwdev *hwdev);
+u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev);
+u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
new file mode 100644
index 000000000000..434696ce7dc2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag)
+{
+ struct comm_cmd_func_reset func_reset = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ func_reset.func_id = func_id;
+ func_reset.reset_flag = reset_flag;
+
+ mgmt_msg_params_init_default(&msg_params, &func_reset,
+ sizeof(func_reset));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FUNC_RESET, &msg_params);
+ if (err || func_reset.head.status) {
+ dev_err(hwdev->dev, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x\n",
+ reset_flag, err, func_reset.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
new file mode 100644
index 000000000000..c33a1c77da9c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_COMM_H_
+#define _HINIC3_HW_COMM_H_
+
+#include "hinic3_hw_intf.h"
+
+struct hinic3_hwdev;
+
+int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
new file mode 100644
index 000000000000..22c84093efa2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_INTF_H_
+#define _HINIC3_HW_INTF_H_
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define MGMT_MSG_CMD_OP_SET 1
+#define MGMT_MSG_CMD_OP_GET 0
+
+#define MGMT_STATUS_PF_SET_VF_ALREADY 0x4
+#define MGMT_STATUS_EXIST 0x6
+#define MGMT_STATUS_CMD_UNSUPPORTED 0xFF
+
+#define MGMT_MSG_POLLING_TIMEOUT 0
+
+struct mgmt_msg_head {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+};
+
+struct mgmt_msg_params {
+ const void *buf_in;
+ u32 in_size;
+ void *buf_out;
+ u32 expected_out_size;
+ u32 timeout_ms;
+};
+
+/* CMDQ MODULE_TYPE */
+enum mgmt_mod_type {
+ /* HW communication module */
+ MGMT_MOD_COMM = 0,
+ /* L2NIC module */
+ MGMT_MOD_L2NIC = 1,
+ /* Configuration module */
+ MGMT_MOD_CFGM = 7,
+ MGMT_MOD_HILINK = 14,
+};
+
+static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_params,
+ void *inout_buf, u32 buf_size)
+{
+ msg_params->buf_in = inout_buf;
+ msg_params->buf_out = inout_buf;
+ msg_params->in_size = buf_size;
+ msg_params->expected_out_size = buf_size;
+ msg_params->timeout_ms = 0;
+}
+
+/* COMM Commands between Driver to fw */
+enum comm_cmd {
+ /* Commands for clearing FLR and resources */
+ COMM_CMD_FUNC_RESET = 0,
+ COMM_CMD_FEATURE_NEGO = 1,
+ COMM_CMD_FLUSH_DOORBELL = 2,
+ COMM_CMD_START_FLUSH = 3,
+ COMM_CMD_GET_GLOBAL_ATTR = 5,
+ COMM_CMD_SET_FUNC_SVC_USED_STATE = 7,
+
+ /* Driver Configuration Commands */
+ COMM_CMD_SET_CMDQ_CTXT = 20,
+ COMM_CMD_SET_VAT = 21,
+ COMM_CMD_CFG_PAGESIZE = 22,
+ COMM_CMD_CFG_MSIX_CTRL_REG = 23,
+ COMM_CMD_SET_CEQ_CTRL_REG = 24,
+ COMM_CMD_SET_DMA_ATTR = 25,
+};
+
+enum comm_func_reset_bits {
+ COMM_FUNC_RESET_BIT_FLUSH = BIT(0),
+ COMM_FUNC_RESET_BIT_MQM = BIT(1),
+ COMM_FUNC_RESET_BIT_SMF = BIT(2),
+ COMM_FUNC_RESET_BIT_PF_BW_CFG = BIT(3),
+
+ COMM_FUNC_RESET_BIT_COMM = BIT(10),
+ /* clear mbox and aeq, The COMM_FUNC_RESET_BIT_COMM bit must be set */
+ COMM_FUNC_RESET_BIT_COMM_MGMT_CH = BIT(11),
+ /* clear cmdq and ceq, The COMM_FUNC_RESET_BIT_COMM bit must be set */
+ COMM_FUNC_RESET_BIT_COMM_CMD_CH = BIT(12),
+ COMM_FUNC_RESET_BIT_NIC = BIT(13),
+};
+
+struct comm_cmd_func_reset {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 rsvd1[3];
+ u64 reset_flag;
+};
+
+#define COMM_MAX_FEATURE_QWORD 4
+struct comm_cmd_feature_nego {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd;
+ u64 s_feature[COMM_MAX_FEATURE_QWORD];
+};
+
+/* Services supported by HW. HW uses these values when delivering events.
+ * HW supports multiple services that are not yet supported by driver
+ * (e.g. RoCE).
+ */
+enum hinic3_service_type {
+ HINIC3_SERVICE_T_NIC = 0,
+ /* MAX is only used by SW for array sizes. */
+ HINIC3_SERVICE_T_MAX = 1,
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
new file mode 100644
index 000000000000..6e8788a64925
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_mgmt.h"
+
+int hinic3_init_hwdev(struct pci_dev *pdev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
+{
+ /* Completed by later submission due to LoC limit. */
+}
+
+void hinic3_set_api_stop(struct hinic3_hwdev *hwdev)
+{
+ /* Completed by later submission due to LoC limit. */
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
new file mode 100644
index 000000000000..62e2745e9316
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HWDEV_H_
+#define _HINIC3_HWDEV_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/pci.h>
+
+#include "hinic3_hw_intf.h"
+
+struct hinic3_cmdqs;
+struct hinic3_hwif;
+
+enum hinic3_event_service_type {
+ HINIC3_EVENT_SRV_COMM = 0,
+ HINIC3_EVENT_SRV_NIC = 1
+};
+
+#define HINIC3_SRV_EVENT_TYPE(svc, type) (((svc) << 16) | (type))
+
+/* driver-specific data of pci_dev */
+struct hinic3_pcidev {
+ struct pci_dev *pdev;
+ struct hinic3_hwdev *hwdev;
+ /* Auxiliary devices */
+ struct hinic3_adev *hadev[HINIC3_SERVICE_T_MAX];
+
+ void __iomem *cfg_reg_base;
+ void __iomem *intr_reg_base;
+ void __iomem *db_base;
+ u64 db_dwqe_len;
+ u64 db_base_phy;
+
+ /* lock for attach/detach uld */
+ struct mutex pdev_mutex;
+ unsigned long state;
+};
+
+struct hinic3_hwdev {
+ struct hinic3_pcidev *adapter;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int dev_id;
+ struct hinic3_hwif *hwif;
+ struct hinic3_cfg_mgmt_info *cfg_mgmt;
+ struct hinic3_aeqs *aeqs;
+ struct hinic3_ceqs *ceqs;
+ struct hinic3_mbox *mbox;
+ struct hinic3_cmdqs *cmdqs;
+ struct workqueue_struct *workq;
+ /* protect channel init and uninit */
+ spinlock_t channel_lock;
+ u64 features[COMM_MAX_FEATURE_QWORD];
+ u32 wq_page_size;
+ u8 max_cmdq;
+ ulong func_state;
+};
+
+struct hinic3_event_info {
+ /* enum hinic3_event_service_type */
+ u16 service;
+ u16 type;
+ u8 event_data[104];
+};
+
+struct hinic3_adev {
+ struct auxiliary_device adev;
+ struct hinic3_hwdev *hwdev;
+ enum hinic3_service_type svc_type;
+
+ void (*event)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event);
+};
+
+int hinic3_init_hwdev(struct pci_dev *pdev);
+void hinic3_free_hwdev(struct hinic3_hwdev *hwdev);
+
+void hinic3_set_api_stop(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
new file mode 100644
index 000000000000..0865453bf0e7
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+
+#include "hinic3_common.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+
+void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_state flag)
+{
+ /* Completed by later submission due to LoC limit. */
+}
+
+u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->hwif->attr.func_global_idx;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
new file mode 100644
index 000000000000..513c9680e6b6
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HWIF_H_
+#define _HINIC3_HWIF_H_
+
+#include <linux/build_bug.h>
+#include <linux/spinlock_types.h>
+
+struct hinic3_hwdev;
+
+enum hinic3_func_type {
+ HINIC3_FUNC_TYPE_VF = 1,
+};
+
+struct hinic3_db_area {
+ unsigned long *db_bitmap_array;
+ u32 db_max_areas;
+ /* protect doorbell area alloc and free */
+ spinlock_t idx_lock;
+};
+
+struct hinic3_func_attr {
+ enum hinic3_func_type func_type;
+ u16 func_global_idx;
+ u16 global_vf_id_of_pf;
+ u16 num_irqs;
+ u16 num_sq;
+ u8 port_to_port_idx;
+ u8 pci_intf_idx;
+ u8 ppf_idx;
+ u8 num_aeqs;
+ u8 num_ceqs;
+ u8 msix_flex_en;
+};
+
+static_assert(sizeof(struct hinic3_func_attr) == 20);
+
+struct hinic3_hwif {
+ u8 __iomem *cfg_regs_base;
+ u64 db_base_phy;
+ u64 db_dwqe_len;
+ u8 __iomem *db_base;
+ struct hinic3_db_area db_area;
+ struct hinic3_func_attr attr;
+};
+
+enum hinic3_msix_state {
+ HINIC3_MSIX_ENABLE,
+ HINIC3_MSIX_DISABLE,
+};
+
+void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_state flag);
+
+u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
new file mode 100644
index 000000000000..8b92eed25edf
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/netdevice.h>
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+static int hinic3_poll(struct napi_struct *napi, int budget)
+{
+ struct hinic3_irq_cfg *irq_cfg =
+ container_of(napi, struct hinic3_irq_cfg, napi);
+ struct hinic3_nic_dev *nic_dev;
+ bool busy = false;
+ int work_done;
+
+ nic_dev = netdev_priv(irq_cfg->netdev);
+
+ busy |= hinic3_tx_poll(irq_cfg->txq, budget);
+
+ if (unlikely(!budget))
+ return 0;
+
+ work_done = hinic3_rx_poll(irq_cfg->rxq, budget);
+ busy |= work_done >= budget;
+
+ if (busy)
+ return budget;
+
+ if (likely(napi_complete_done(napi, work_done)))
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return work_done;
+}
+
+void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
+
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
+ netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
+ napi_enable(&irq_cfg->napi);
+}
+
+void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
+{
+ napi_disable(&irq_cfg->napi);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id);
+ netif_napi_del(&irq_cfg->napi);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
new file mode 100644
index 000000000000..4827326e6a59
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_lld.h"
+#include "hinic3_mgmt.h"
+
+#define HINIC3_VF_PCI_CFG_REG_BAR 0
+#define HINIC3_PCI_INTR_REG_BAR 2
+#define HINIC3_PCI_DB_BAR 4
+
+#define HINIC3_EVENT_POLL_SLEEP_US 1000
+#define HINIC3_EVENT_POLL_TIMEOUT_US 10000000
+
+static struct hinic3_adev_device {
+ const char *name;
+} hinic3_adev_devices[HINIC3_SERVICE_T_MAX] = {
+ [HINIC3_SERVICE_T_NIC] = {
+ .name = "nic",
+ },
+};
+
+static bool hinic3_adev_svc_supported(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ switch (svc_type) {
+ case HINIC3_SERVICE_T_NIC:
+ return hinic3_support_nic(hwdev);
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static void hinic3_comm_adev_release(struct device *dev)
+{
+ struct hinic3_adev *hadev = container_of(dev, struct hinic3_adev,
+ adev.dev);
+
+ kfree(hadev);
+}
+
+static struct hinic3_adev *hinic3_add_one_adev(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ struct hinic3_adev *hadev;
+ const char *svc_name;
+ int ret;
+
+ hadev = kzalloc(sizeof(*hadev), GFP_KERNEL);
+ if (!hadev)
+ return NULL;
+
+ svc_name = hinic3_adev_devices[svc_type].name;
+ hadev->adev.name = svc_name;
+ hadev->adev.id = hwdev->dev_id;
+ hadev->adev.dev.parent = hwdev->dev;
+ hadev->adev.dev.release = hinic3_comm_adev_release;
+ hadev->svc_type = svc_type;
+ hadev->hwdev = hwdev;
+
+ ret = auxiliary_device_init(&hadev->adev);
+ if (ret) {
+ dev_err(hwdev->dev, "failed init adev %s %u\n",
+ svc_name, hwdev->dev_id);
+ kfree(hadev);
+ return NULL;
+ }
+
+ ret = auxiliary_device_add(&hadev->adev);
+ if (ret) {
+ dev_err(hwdev->dev, "failed to add adev %s %u\n",
+ svc_name, hwdev->dev_id);
+ auxiliary_device_uninit(&hadev->adev);
+ return NULL;
+ }
+
+ return hadev;
+}
+
+static void hinic3_del_one_adev(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ struct hinic3_adev *hadev;
+ int timeout;
+ bool state;
+
+ timeout = read_poll_timeout(test_and_set_bit, state, !state,
+ HINIC3_EVENT_POLL_SLEEP_US,
+ HINIC3_EVENT_POLL_TIMEOUT_US,
+ false, svc_type, &pci_adapter->state);
+
+ hadev = pci_adapter->hadev[svc_type];
+ auxiliary_device_delete(&hadev->adev);
+ auxiliary_device_uninit(&hadev->adev);
+ pci_adapter->hadev[svc_type] = NULL;
+ if (!timeout)
+ clear_bit(svc_type, &pci_adapter->state);
+}
+
+static int hinic3_attach_aux_devices(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ enum hinic3_service_type svc_type;
+
+ mutex_lock(&pci_adapter->pdev_mutex);
+
+ for (svc_type = 0; svc_type < HINIC3_SERVICE_T_MAX; svc_type++) {
+ if (!hinic3_adev_svc_supported(hwdev, svc_type))
+ continue;
+
+ pci_adapter->hadev[svc_type] = hinic3_add_one_adev(hwdev,
+ svc_type);
+ if (!pci_adapter->hadev[svc_type])
+ goto err_del_adevs;
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+ return 0;
+
+err_del_adevs:
+ while (svc_type > 0) {
+ svc_type--;
+ if (pci_adapter->hadev[svc_type]) {
+ hinic3_del_one_adev(hwdev, svc_type);
+ pci_adapter->hadev[svc_type] = NULL;
+ }
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+ return -ENOMEM;
+}
+
+static void hinic3_detach_aux_devices(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ int i;
+
+ mutex_lock(&pci_adapter->pdev_mutex);
+ for (i = 0; i < ARRAY_SIZE(hinic3_adev_devices); i++) {
+ if (pci_adapter->hadev[i])
+ hinic3_del_one_adev(hwdev, i);
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+}
+
+struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev)
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ return hadev->hwdev;
+}
+
+void hinic3_adev_event_register(struct auxiliary_device *adev,
+ void (*event_handler)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event))
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ hadev->event = event_handler;
+}
+
+void hinic3_adev_event_unregister(struct auxiliary_device *adev)
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ hadev->event = NULL;
+}
+
+static int hinic3_mapping_bar(struct pci_dev *pdev,
+ struct hinic3_pcidev *pci_adapter)
+{
+ pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev,
+ HINIC3_VF_PCI_CFG_REG_BAR);
+ if (!pci_adapter->cfg_reg_base) {
+ dev_err(&pdev->dev, "Failed to map configuration regs\n");
+ return -ENOMEM;
+ }
+
+ pci_adapter->intr_reg_base = pci_ioremap_bar(pdev,
+ HINIC3_PCI_INTR_REG_BAR);
+ if (!pci_adapter->intr_reg_base) {
+ dev_err(&pdev->dev, "Failed to map interrupt regs\n");
+ goto err_unmap_cfg_reg_base;
+ }
+
+ pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC3_PCI_DB_BAR);
+ pci_adapter->db_dwqe_len = pci_resource_len(pdev, HINIC3_PCI_DB_BAR);
+ pci_adapter->db_base = pci_ioremap_bar(pdev, HINIC3_PCI_DB_BAR);
+ if (!pci_adapter->db_base) {
+ dev_err(&pdev->dev, "Failed to map doorbell regs\n");
+ goto err_unmap_intr_reg_base;
+ }
+
+ return 0;
+
+err_unmap_intr_reg_base:
+ iounmap(pci_adapter->intr_reg_base);
+
+err_unmap_cfg_reg_base:
+ iounmap(pci_adapter->cfg_reg_base);
+
+ return -ENOMEM;
+}
+
+static void hinic3_unmapping_bar(struct hinic3_pcidev *pci_adapter)
+{
+ iounmap(pci_adapter->db_base);
+ iounmap(pci_adapter->intr_reg_base);
+ iounmap(pci_adapter->cfg_reg_base);
+}
+
+static int hinic3_pci_init(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter;
+ int err;
+
+ pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL);
+ if (!pci_adapter)
+ return -ENOMEM;
+
+ pci_adapter->pdev = pdev;
+ mutex_init(&pci_adapter->pdev_mutex);
+
+ pci_set_drvdata(pdev, pci_adapter);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ goto err_free_pci_adapter;
+ }
+
+ err = pci_request_regions(pdev, HINIC3_NIC_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request regions\n");
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+
+err_disable_device:
+ pci_disable_device(pdev);
+
+err_free_pci_adapter:
+ pci_set_drvdata(pdev, NULL);
+ mutex_destroy(&pci_adapter->pdev_mutex);
+ kfree(pci_adapter);
+
+ return err;
+}
+
+static void hinic3_pci_uninit(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ mutex_destroy(&pci_adapter->pdev_mutex);
+ kfree(pci_adapter);
+}
+
+static int hinic3_func_init(struct pci_dev *pdev,
+ struct hinic3_pcidev *pci_adapter)
+{
+ int err;
+
+ err = hinic3_init_hwdev(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize hardware device\n");
+ return err;
+ }
+
+ err = hinic3_attach_aux_devices(pci_adapter->hwdev);
+ if (err)
+ goto err_free_hwdev;
+
+ return 0;
+
+err_free_hwdev:
+ hinic3_free_hwdev(pci_adapter->hwdev);
+
+ return err;
+}
+
+static void hinic3_func_uninit(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ hinic3_detach_aux_devices(pci_adapter->hwdev);
+ hinic3_free_hwdev(pci_adapter->hwdev);
+}
+
+static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pdev;
+ int err;
+
+ err = hinic3_mapping_bar(pdev, pci_adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map bar\n");
+ goto err_out;
+ }
+
+ err = hinic3_func_init(pdev, pci_adapter);
+ if (err)
+ goto err_unmap_bar;
+
+ return 0;
+
+err_unmap_bar:
+ hinic3_unmapping_bar(pci_adapter);
+
+err_out:
+ dev_err(&pdev->dev, "PCIe device probe function failed\n");
+ return err;
+}
+
+static void hinic3_remove_func(struct hinic3_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pdev;
+
+ hinic3_func_uninit(pdev);
+ hinic3_unmapping_bar(pci_adapter);
+}
+
+static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hinic3_pcidev *pci_adapter;
+ int err;
+
+ err = hinic3_pci_init(pdev);
+ if (err)
+ goto err_out;
+
+ pci_adapter = pci_get_drvdata(pdev);
+ err = hinic3_probe_func(pci_adapter);
+ if (err)
+ goto err_uninit_pci;
+
+ return 0;
+
+err_uninit_pci:
+ hinic3_pci_uninit(pdev);
+
+err_out:
+ dev_err(&pdev->dev, "PCIe device probe failed\n");
+ return err;
+}
+
+static void hinic3_remove(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ hinic3_remove_func(pci_adapter);
+ hinic3_pci_uninit(pdev);
+}
+
+static const struct pci_device_id hinic3_pci_table[] = {
+ /* Completed by later submission due to LoC limit. */
+ {0, 0}
+
+};
+
+MODULE_DEVICE_TABLE(pci, hinic3_pci_table);
+
+static void hinic3_shutdown(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ pci_disable_device(pdev);
+
+ if (pci_adapter)
+ hinic3_set_api_stop(pci_adapter->hwdev);
+}
+
+static struct pci_driver hinic3_driver = {
+ .name = HINIC3_NIC_DRV_NAME,
+ .id_table = hinic3_pci_table,
+ .probe = hinic3_probe,
+ .remove = hinic3_remove,
+ .shutdown = hinic3_shutdown,
+ .sriov_configure = pci_sriov_configure_simple
+};
+
+int hinic3_lld_init(void)
+{
+ return pci_register_driver(&hinic3_driver);
+}
+
+void hinic3_lld_exit(void)
+{
+ pci_unregister_driver(&hinic3_driver);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h
new file mode 100644
index 000000000000..322b44803476
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_LLD_H_
+#define _HINIC3_LLD_H_
+
+#include <linux/auxiliary_bus.h>
+
+struct hinic3_event_info;
+
+#define HINIC3_NIC_DRV_NAME "hinic3"
+
+int hinic3_lld_init(void);
+void hinic3_lld_exit(void);
+void hinic3_adev_event_register(struct auxiliary_device *adev,
+ void (*event_handler)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event));
+void hinic3_adev_event_unregister(struct auxiliary_device *adev);
+struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
new file mode 100644
index 000000000000..497f2a36f35d
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic3_common.h"
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_lld.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+#define HINIC3_NIC_DRV_DESC "Intelligent Network Interface Card Driver"
+
+#define HINIC3_RX_BUF_LEN 2048
+#define HINIC3_LRO_REPLENISH_THLD 256
+#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq"
+
+#define HINIC3_SQ_DEPTH 1024
+#define HINIC3_RQ_DEPTH 1024
+
+static int hinic3_alloc_txrxqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ err = hinic3_alloc_txqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc txqs\n");
+ return err;
+ }
+
+ err = hinic3_alloc_rxqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc rxqs\n");
+ goto err_free_txqs;
+ }
+
+ return 0;
+
+err_free_txqs:
+ hinic3_free_txqs(netdev);
+
+ return err;
+}
+
+static void hinic3_free_txrxqs(struct net_device *netdev)
+{
+ hinic3_free_rxqs(netdev);
+ hinic3_free_txqs(netdev);
+}
+
+static int hinic3_init_nic_dev(struct net_device *netdev,
+ struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = hwdev->pdev;
+
+ nic_dev->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ nic_dev->hwdev = hwdev;
+ nic_dev->pdev = pdev;
+
+ nic_dev->rx_buf_len = HINIC3_RX_BUF_LEN;
+ nic_dev->lro_replenish_thld = HINIC3_LRO_REPLENISH_THLD;
+ nic_dev->nic_svc_cap = hwdev->cfg_mgmt->cap.nic_svc_cap;
+
+ return 0;
+}
+
+static int hinic3_sw_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH;
+ nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
+
+ /* VF driver always uses random MAC address. During VM migration to a
+ * new device, the new device should learn the VMs old MAC rather than
+ * provide its own MAC. The product design assumes that every VF is
+ * suspectable to migration so the device avoids offering MAC address
+ * to VFs.
+ */
+ eth_hw_addr_random(netdev);
+ err = hinic3_set_mac(hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(hwdev));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set default MAC\n");
+ return err;
+ }
+
+ err = hinic3_alloc_txrxqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc qps\n");
+ goto err_del_mac;
+ }
+
+ return 0;
+
+err_del_mac:
+ hinic3_del_mac(hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(hwdev));
+
+ return err;
+}
+
+static void hinic3_sw_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_free_txrxqs(netdev);
+ hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+}
+
+static void hinic3_assign_netdev_ops(struct net_device *netdev)
+{
+ hinic3_set_netdev_ops(netdev);
+}
+
+static void netdev_feature_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ netdev_features_t cso_fts = 0;
+ netdev_features_t tso_fts = 0;
+ netdev_features_t dft_fts;
+
+ dft_fts = NETIF_F_SG | NETIF_F_HIGHDMA;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_CSUM))
+ cso_fts |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_SCTP_CRC))
+ cso_fts |= NETIF_F_SCTP_CRC;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_TSO))
+ tso_fts |= NETIF_F_TSO | NETIF_F_TSO6;
+
+ netdev->features |= dft_fts | cso_fts | tso_fts;
+}
+
+static int hinic3_set_default_hw_feature(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ err = hinic3_set_nic_feature_to_hw(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set nic features\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void hinic3_link_status_change(struct net_device *netdev,
+ bool link_status_up)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (link_status_up) {
+ if (netif_carrier_ok(netdev))
+ return;
+
+ nic_dev->link_status_up = true;
+ netif_carrier_on(netdev);
+ netdev_dbg(netdev, "Link is up\n");
+ } else {
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ nic_dev->link_status_up = false;
+ netif_carrier_off(netdev);
+ netdev_dbg(netdev, "Link is down\n");
+ }
+}
+
+static void hinic3_nic_event(struct auxiliary_device *adev,
+ struct hinic3_event_info *event)
+{
+ struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev);
+ struct net_device *netdev;
+
+ netdev = nic_dev->netdev;
+
+ switch (HINIC3_SRV_EVENT_TYPE(event->service, event->type)) {
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
+ HINIC3_NIC_EVENT_LINK_UP):
+ hinic3_link_status_change(netdev, true);
+ break;
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
+ HINIC3_NIC_EVENT_LINK_DOWN):
+ hinic3_link_status_change(netdev, false);
+ break;
+ default:
+ break;
+ }
+}
+
+static int hinic3_nic_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct hinic3_hwdev *hwdev = hinic3_adev_get_hwdev(adev);
+ struct pci_dev *pdev = hwdev->pdev;
+ struct hinic3_nic_dev *nic_dev;
+ struct net_device *netdev;
+ u16 max_qps, glb_func_id;
+ int err;
+
+ if (!hinic3_support_nic(hwdev)) {
+ dev_dbg(&adev->dev, "HW doesn't support nic\n");
+ return 0;
+ }
+
+ hinic3_adev_event_register(adev, hinic3_nic_event);
+
+ glb_func_id = hinic3_global_func_id(hwdev);
+ err = hinic3_func_reset(hwdev, glb_func_id, COMM_FUNC_RESET_BIT_NIC);
+ if (err) {
+ dev_err(&adev->dev, "Failed to reset function\n");
+ goto err_unregister_adev_event;
+ }
+
+ max_qps = hinic3_func_max_qnum(hwdev);
+ netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps);
+ if (!netdev) {
+ dev_err(&adev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto err_unregister_adev_event;
+ }
+
+ nic_dev = netdev_priv(netdev);
+ dev_set_drvdata(&adev->dev, nic_dev);
+ err = hinic3_init_nic_dev(netdev, hwdev);
+ if (err)
+ goto err_free_netdev;
+
+ err = hinic3_init_nic_io(nic_dev);
+ if (err)
+ goto err_free_netdev;
+
+ err = hinic3_sw_init(netdev);
+ if (err)
+ goto err_free_nic_io;
+
+ hinic3_assign_netdev_ops(netdev);
+
+ netdev_feature_init(netdev);
+ err = hinic3_set_default_hw_feature(netdev);
+ if (err)
+ goto err_uninit_sw;
+
+ netif_carrier_off(netdev);
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_uninit_nic_feature;
+
+ return 0;
+
+err_uninit_nic_feature:
+ hinic3_update_nic_feature(nic_dev, 0);
+ hinic3_set_nic_feature_to_hw(nic_dev);
+
+err_uninit_sw:
+ hinic3_sw_uninit(netdev);
+
+err_free_nic_io:
+ hinic3_free_nic_io(nic_dev);
+
+err_free_netdev:
+ free_netdev(netdev);
+
+err_unregister_adev_event:
+ hinic3_adev_event_unregister(adev);
+ dev_err(&pdev->dev, "NIC service probe failed\n");
+
+ return err;
+}
+
+static void hinic3_nic_remove(struct auxiliary_device *adev)
+{
+ struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev);
+ struct net_device *netdev;
+
+ if (!hinic3_support_nic(nic_dev->hwdev))
+ return;
+
+ netdev = nic_dev->netdev;
+ unregister_netdev(netdev);
+
+ hinic3_update_nic_feature(nic_dev, 0);
+ hinic3_set_nic_feature_to_hw(nic_dev);
+ hinic3_sw_uninit(netdev);
+
+ hinic3_free_nic_io(nic_dev);
+
+ free_netdev(netdev);
+}
+
+static const struct auxiliary_device_id hinic3_nic_id_table[] = {
+ {
+ .name = HINIC3_NIC_DRV_NAME ".nic",
+ },
+ {}
+};
+
+static struct auxiliary_driver hinic3_nic_driver = {
+ .probe = hinic3_nic_probe,
+ .remove = hinic3_nic_remove,
+ .suspend = NULL,
+ .resume = NULL,
+ .name = "nic",
+ .id_table = hinic3_nic_id_table,
+};
+
+static __init int hinic3_nic_lld_init(void)
+{
+ int err;
+
+ err = hinic3_lld_init();
+ if (err)
+ return err;
+
+ err = auxiliary_driver_register(&hinic3_nic_driver);
+ if (err) {
+ hinic3_lld_exit();
+ return err;
+ }
+
+ return 0;
+}
+
+static __exit void hinic3_nic_lld_exit(void)
+{
+ auxiliary_driver_unregister(&hinic3_nic_driver);
+
+ hinic3_lld_exit();
+}
+
+module_init(hinic3_nic_lld_init);
+module_exit(hinic3_nic_lld_exit);
+
+MODULE_AUTHOR("Huawei Technologies CO., Ltd");
+MODULE_DESCRIPTION(HINIC3_NIC_DRV_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
new file mode 100644
index 000000000000..e74d1eb09730
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/dma-mapping.h>
+
+#include "hinic3_common.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
new file mode 100644
index 000000000000..d7a6c37b7eff
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MBOX_H_
+#define _HINIC3_MBOX_H_
+
+#include <linux/bitfield.h>
+#include <linux/mutex.h>
+
+struct hinic3_hwdev;
+
+int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
new file mode 100644
index 000000000000..4edabeb32112
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MGMT_H_
+#define _HINIC3_MGMT_H_
+
+#include <linux/types.h>
+
+struct hinic3_hwdev;
+
+void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
new file mode 100644
index 000000000000..c4434efdc7f7
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MGMT_INTERFACE_H_
+#define _HINIC3_MGMT_INTERFACE_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/if_ether.h>
+
+#include "hinic3_hw_intf.h"
+
+struct l2nic_cmd_feature_nego {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd;
+ u64 s_feature[4];
+};
+
+enum l2nic_func_tbl_cfg_bitmap {
+ L2NIC_FUNC_TBL_CFG_INIT = 0,
+ L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE = 1,
+ L2NIC_FUNC_TBL_CFG_MTU = 2,
+};
+
+struct l2nic_func_tbl_cfg {
+ u16 rx_wqe_buf_size;
+ u16 mtu;
+ u32 rsvd[9];
+};
+
+struct l2nic_cmd_set_func_tbl {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd;
+ u32 cfg_bitmap;
+ struct l2nic_func_tbl_cfg tbl_cfg;
+};
+
+struct l2nic_cmd_set_mac {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 mac[ETH_ALEN];
+};
+
+struct l2nic_cmd_update_mac {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct l2nic_cmd_force_pkt_drop {
+ struct mgmt_msg_head msg_head;
+ u8 port;
+ u8 rsvd1[3];
+};
+
+/* Commands between NIC to fw */
+enum l2nic_cmd {
+ /* FUNC CFG */
+ L2NIC_CMD_SET_FUNC_TBL = 5,
+ L2NIC_CMD_SET_VPORT_ENABLE = 6,
+ L2NIC_CMD_SET_SQ_CI_ATTR = 8,
+ L2NIC_CMD_CLEAR_QP_RESOURCE = 11,
+ L2NIC_CMD_FEATURE_NEGO = 15,
+ L2NIC_CMD_SET_MAC = 21,
+ L2NIC_CMD_DEL_MAC = 22,
+ L2NIC_CMD_UPDATE_MAC = 23,
+ L2NIC_CMD_CFG_RSS = 60,
+ L2NIC_CMD_CFG_RSS_HASH_KEY = 63,
+ L2NIC_CMD_CFG_RSS_HASH_ENGINE = 64,
+ L2NIC_CMD_SET_RSS_CTX_TBL = 65,
+ L2NIC_CMD_QOS_DCB_STATE = 110,
+ L2NIC_CMD_FORCE_PKT_DROP = 113,
+ L2NIC_CMD_MAX = 256,
+};
+
+enum hinic3_nic_feature_cap {
+ HINIC3_NIC_F_CSUM = BIT(0),
+ HINIC3_NIC_F_SCTP_CRC = BIT(1),
+ HINIC3_NIC_F_TSO = BIT(2),
+ HINIC3_NIC_F_LRO = BIT(3),
+ HINIC3_NIC_F_UFO = BIT(4),
+ HINIC3_NIC_F_RSS = BIT(5),
+ HINIC3_NIC_F_RX_VLAN_FILTER = BIT(6),
+ HINIC3_NIC_F_RX_VLAN_STRIP = BIT(7),
+ HINIC3_NIC_F_TX_VLAN_INSERT = BIT(8),
+ HINIC3_NIC_F_VXLAN_OFFLOAD = BIT(9),
+ HINIC3_NIC_F_FDIR = BIT(11),
+ HINIC3_NIC_F_PROMISC = BIT(12),
+ HINIC3_NIC_F_ALLMULTI = BIT(13),
+ HINIC3_NIC_F_RATE_LIMIT = BIT(16),
+};
+
+#define HINIC3_NIC_F_ALL_MASK 0x33bff
+#define HINIC3_NIC_DRV_DEFAULT_FEATURE 0x3f03f
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
new file mode 100644
index 000000000000..71104a6b8bef
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic3_hwif.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+static int hinic3_open(struct net_device *netdev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+static int hinic3_close(struct net_device *netdev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int err;
+
+ err = hinic3_set_port_mtu(netdev, new_mtu);
+ if (err) {
+ netdev_err(netdev, "Failed to change port mtu to %d\n",
+ new_mtu);
+ return err;
+ }
+
+ netdev_dbg(netdev, "Change mtu from %u to %d\n", netdev->mtu, new_mtu);
+ WRITE_ONCE(netdev->mtu, new_mtu);
+
+ return 0;
+}
+
+static int hinic3_set_mac_addr(struct net_device *netdev, void *addr)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ int err;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
+ return 0;
+
+ err = hinic3_update_mac(nic_dev->hwdev, netdev->dev_addr,
+ saddr->sa_data, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+
+ if (err)
+ return err;
+
+ eth_hw_addr_set(netdev, saddr->sa_data);
+
+ return 0;
+}
+
+static const struct net_device_ops hinic3_netdev_ops = {
+ .ndo_open = hinic3_open,
+ .ndo_stop = hinic3_close,
+ .ndo_change_mtu = hinic3_change_mtu,
+ .ndo_set_mac_address = hinic3_set_mac_addr,
+ .ndo_start_xmit = hinic3_xmit_frame,
+};
+
+void hinic3_set_netdev_ops(struct net_device *netdev)
+{
+ netdev->netdev_ops = &hinic3_netdev_ops;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
new file mode 100644
index 000000000000..5b1a91a18c67
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/if_vlan.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+
+static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
+ u64 *s_feature, u16 size)
+{
+ struct l2nic_cmd_feature_nego feature_nego = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ feature_nego.func_id = hinic3_global_func_id(hwdev);
+ feature_nego.opcode = opcode;
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64));
+
+ mgmt_msg_params_init_default(&msg_params, &feature_nego,
+ sizeof(feature_nego));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_FEATURE_NEGO, &msg_params);
+ if (err || feature_nego.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to negotiate nic feature, err:%d, status: 0x%x\n",
+ err, feature_nego.msg_head.status);
+ return -EIO;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64));
+
+ return 0;
+}
+
+int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev)
+{
+ return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_SET,
+ &nic_dev->nic_io->feature_cap, 1);
+}
+
+bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_nic_feature_cap feature_bits)
+{
+ return (nic_dev->nic_io->feature_cap & feature_bits) == feature_bits;
+}
+
+void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap)
+{
+ nic_dev->nic_io->feature_cap = feature_cap;
+}
+
+static int hinic3_set_function_table(struct hinic3_hwdev *hwdev, u32 cfg_bitmap,
+ const struct l2nic_func_tbl_cfg *cfg)
+{
+ struct l2nic_cmd_set_func_tbl cmd_func_tbl = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cmd_func_tbl.func_id = hinic3_global_func_id(hwdev);
+ cmd_func_tbl.cfg_bitmap = cfg_bitmap;
+ cmd_func_tbl.tbl_cfg = *cfg;
+
+ mgmt_msg_params_init_default(&msg_params, &cmd_func_tbl,
+ sizeof(cmd_func_tbl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_FUNC_TBL, &msg_params);
+ if (err || cmd_func_tbl.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set func table, bitmap: 0x%x, err: %d, status: 0x%x\n",
+ cfg_bitmap, err, cmd_func_tbl.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct l2nic_func_tbl_cfg func_tbl_cfg = {};
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ func_tbl_cfg.mtu = new_mtu;
+ return hinic3_set_function_table(hwdev, BIT(L2NIC_FUNC_TBL_CFG_MTU),
+ &func_tbl_cfg);
+}
+
+static int hinic3_check_mac_info(struct hinic3_hwdev *hwdev, u8 status,
+ u16 vlan_id)
+{
+ if ((status && status != MGMT_STATUS_EXIST) ||
+ ((vlan_id & BIT(15)) && status == MGMT_STATUS_EXIST)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id)
+{
+ struct l2nic_cmd_set_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.mac, mac_addr);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_MAC, &msg_params);
+ if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status,
+ mac_info.vlan_id)) {
+ dev_err(hwdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return -EIO;
+ }
+
+ if (mac_info.msg_head.status == MGMT_STATUS_PF_SET_VF_ALREADY) {
+ dev_warn(hwdev->dev, "PF has already set VF mac, Ignore set operation\n");
+ return 0;
+ }
+
+ if (mac_info.msg_head.status == MGMT_STATUS_EXIST) {
+ dev_warn(hwdev->dev, "MAC is repeated. Ignore update operation\n");
+ return 0;
+ }
+
+ return 0;
+}
+
+int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id)
+{
+ struct l2nic_cmd_set_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.mac, mac_addr);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_DEL_MAC, &msg_params);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to delete MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id)
+{
+ struct l2nic_cmd_update_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.old_mac, old_mac);
+ ether_addr_copy(mac_info.new_mac, new_mac);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_UPDATE_MAC, &msg_params);
+ if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status,
+ mac_info.vlan_id)) {
+ dev_err(hwdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return -EIO;
+ }
+ return 0;
+}
+
+int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev)
+{
+ struct l2nic_cmd_force_pkt_drop pkt_drop = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ pkt_drop.port = hinic3_physical_port_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &pkt_drop, sizeof(pkt_drop));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_FORCE_PKT_DROP, &msg_params);
+ if ((pkt_drop.msg_head.status != MGMT_STATUS_CMD_UNSUPPORTED &&
+ pkt_drop.msg_head.status) || err) {
+ dev_err(hwdev->dev,
+ "Failed to set force tx packets drop, err: %d, status: 0x%x\n",
+ err, pkt_drop.msg_head.status);
+ return -EFAULT;
+ }
+
+ return pkt_drop.msg_head.status;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
new file mode 100644
index 000000000000..bf9ce51dc401
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_CFG_H_
+#define _HINIC3_NIC_CFG_H_
+
+#include <linux/types.h>
+
+#include "hinic3_hw_intf.h"
+#include "hinic3_mgmt_interface.h"
+
+struct hinic3_hwdev;
+struct hinic3_nic_dev;
+
+#define HINIC3_MIN_MTU_SIZE 256
+#define HINIC3_MAX_JUMBO_FRAME_SIZE 9600
+
+#define HINIC3_VLAN_ID_MASK 0x7FFF
+
+enum hinic3_nic_event_type {
+ HINIC3_NIC_EVENT_LINK_DOWN = 0,
+ HINIC3_NIC_EVENT_LINK_UP = 1,
+};
+
+int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev);
+bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_nic_feature_cap feature_bits);
+void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap);
+
+int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu);
+
+int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id);
+int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id);
+int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id);
+
+int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
new file mode 100644
index 000000000000..c994fc9b6ee0
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_DEV_H_
+#define _HINIC3_NIC_DEV_H_
+
+#include <linux/netdevice.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_mgmt_interface.h"
+
+enum hinic3_flags {
+ HINIC3_RSS_ENABLE,
+};
+
+enum hinic3_rss_hash_type {
+ HINIC3_RSS_HASH_ENGINE_TYPE_XOR = 0,
+ HINIC3_RSS_HASH_ENGINE_TYPE_TOEP = 1,
+};
+
+struct hinic3_rss_type {
+ u8 tcp_ipv6_ext;
+ u8 ipv6_ext;
+ u8 tcp_ipv6;
+ u8 ipv6;
+ u8 tcp_ipv4;
+ u8 ipv4;
+ u8 udp_ipv6;
+ u8 udp_ipv4;
+};
+
+struct hinic3_irq_cfg {
+ struct net_device *netdev;
+ u16 msix_entry_idx;
+ /* provided by OS */
+ u32 irq_id;
+ char irq_name[IFNAMSIZ + 16];
+ struct napi_struct napi;
+ cpumask_t affinity_mask;
+ struct hinic3_txq *txq;
+ struct hinic3_rxq *rxq;
+};
+
+struct hinic3_dyna_txrxq_params {
+ u16 num_qps;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ struct hinic3_dyna_txq_res *txqs_res;
+ struct hinic3_dyna_rxq_res *rxqs_res;
+ struct hinic3_irq_cfg *irq_cfg;
+};
+
+struct hinic3_nic_dev {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_nic_io *nic_io;
+
+ u16 max_qps;
+ u16 rx_buf_len;
+ u32 lro_replenish_thld;
+ unsigned long flags;
+ struct hinic3_nic_service_cap nic_svc_cap;
+
+ struct hinic3_dyna_txrxq_params q_params;
+ struct hinic3_txq *txqs;
+ struct hinic3_rxq *rxqs;
+
+ u16 num_qp_irq;
+ struct msix_entry *qps_msix_entries;
+
+ bool link_status_up;
+};
+
+void hinic3_set_netdev_ops(struct net_device *netdev);
+
+/* Temporary prototypes. Functions become static in later submission. */
+void qp_add_napi(struct hinic3_irq_cfg *irq_cfg);
+void qp_del_napi(struct hinic3_irq_cfg *irq_cfg);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
new file mode 100644
index 000000000000..34a1f5bd5ac1
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hw_intf.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+
+int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev)
+{
+ /* Completed by later submission due to LoC limit. */
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
new file mode 100644
index 000000000000..865ba6878c48
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_IO_H_
+#define _HINIC3_NIC_IO_H_
+
+#include <linux/bitfield.h>
+
+#include "hinic3_wq.h"
+
+struct hinic3_nic_dev;
+
+#define HINIC3_SQ_WQEBB_SHIFT 4
+#define HINIC3_RQ_WQEBB_SHIFT 3
+#define HINIC3_SQ_WQEBB_SIZE BIT(HINIC3_SQ_WQEBB_SHIFT)
+
+/* ******************** RQ_CTRL ******************** */
+enum hinic3_rq_wqe_type {
+ HINIC3_NORMAL_RQ_WQE = 1,
+};
+
+/* ******************** SQ_CTRL ******************** */
+#define HINIC3_TX_MSS_DEFAULT 0x3E00
+#define HINIC3_TX_MSS_MIN 0x50
+#define HINIC3_MAX_SQ_SGE 18
+
+struct hinic3_io_queue {
+ struct hinic3_wq wq;
+ u8 owner;
+ u16 q_id;
+ u16 msix_entry_idx;
+ u8 __iomem *db_addr;
+ u16 *cons_idx_addr;
+} ____cacheline_aligned;
+
+static inline u16 hinic3_get_sq_local_ci(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return wq->cons_idx & wq->idx_mask;
+}
+
+static inline u16 hinic3_get_sq_local_pi(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return wq->prod_idx & wq->idx_mask;
+}
+
+static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return READ_ONCE(*sq->cons_idx_addr) & wq->idx_mask;
+}
+
+/* ******************** DB INFO ******************** */
+#define DB_INFO_QID_MASK GENMASK(12, 0)
+#define DB_INFO_CFLAG_MASK BIT(23)
+#define DB_INFO_COS_MASK GENMASK(26, 24)
+#define DB_INFO_TYPE_MASK GENMASK(31, 27)
+#define DB_INFO_SET(val, member) \
+ FIELD_PREP(DB_INFO_##member##_MASK, val)
+
+#define DB_PI_LOW_MASK 0xFFU
+#define DB_PI_HIGH_MASK 0xFFU
+#define DB_PI_HI_SHIFT 8
+#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK)
+#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK)
+#define DB_ADDR(q, pi) ((u64 __iomem *)((q)->db_addr) + DB_PI_LOW(pi))
+#define DB_SRC_TYPE 1
+
+/* CFLAG_DATA_PATH */
+#define DB_CFLAG_DP_SQ 0
+#define DB_CFLAG_DP_RQ 1
+
+struct hinic3_nic_db {
+ u32 db_info;
+ u32 pi_hi;
+};
+
+static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos,
+ u8 cflag, u16 pi)
+{
+ struct hinic3_nic_db db;
+
+ db.db_info = DB_INFO_SET(DB_SRC_TYPE, TYPE) |
+ DB_INFO_SET(cflag, CFLAG) |
+ DB_INFO_SET(cos, COS) |
+ DB_INFO_SET(queue->q_id, QID);
+ db.pi_hi = DB_PI_HIGH(pi);
+
+ writeq(*((u64 *)&db), DB_ADDR(queue, pi));
+}
+
+struct hinic3_nic_io {
+ struct hinic3_io_queue *sq;
+ struct hinic3_io_queue *rq;
+
+ u16 num_qps;
+ u16 max_qps;
+
+ /* Base address for consumer index of all tx queues. Each queue is
+ * given a full cache line to hold its consumer index. HW updates
+ * current consumer index as it consumes tx WQEs.
+ */
+ void *ci_vaddr_base;
+ dma_addr_t ci_dma_base;
+
+ u8 __iomem *sqs_db_addr;
+ u8 __iomem *rqs_db_addr;
+
+ u16 rx_buf_len;
+ u64 feature_cap;
+};
+
+int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c
new file mode 100644
index 000000000000..fab9011de9ad
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/device.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_queue_common.h"
+
+void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth,
+ u32 page_size, u32 elem_size)
+{
+ u32 elem_per_page;
+
+ elem_per_page = min(page_size / elem_size, q_depth);
+
+ qpages->pages = NULL;
+ qpages->page_size = page_size;
+ qpages->num_pages = max(q_depth / elem_per_page, 1);
+ qpages->elem_size_shift = ilog2(elem_size);
+ qpages->elem_per_pg_shift = ilog2(elem_per_page);
+}
+
+static void __queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 pg_cnt)
+{
+ while (pg_cnt > 0) {
+ pg_cnt--;
+ hinic3_dma_free_coherent_align(hwdev->dev,
+ qpages->pages + pg_cnt);
+ }
+ kfree(qpages->pages);
+ qpages->pages = NULL;
+}
+
+void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages)
+{
+ __queue_pages_free(hwdev, qpages, qpages->num_pages);
+}
+
+int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 align)
+{
+ u32 pg_idx;
+ int err;
+
+ qpages->pages = kcalloc(qpages->num_pages, sizeof(qpages->pages[0]),
+ GFP_KERNEL);
+ if (!qpages->pages)
+ return -ENOMEM;
+
+ if (align == 0)
+ align = qpages->page_size;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) {
+ err = hinic3_dma_zalloc_coherent_align(hwdev->dev,
+ qpages->page_size,
+ align,
+ GFP_KERNEL,
+ qpages->pages + pg_idx);
+ if (err) {
+ __queue_pages_free(hwdev, qpages, pg_idx);
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h
new file mode 100644
index 000000000000..ec4cae0a0929
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_QUEUE_COMMON_H_
+#define _HINIC3_QUEUE_COMMON_H_
+
+#include <linux/types.h>
+
+#include "hinic3_common.h"
+
+struct hinic3_hwdev;
+
+struct hinic3_queue_pages {
+ /* Array of DMA-able pages that actually holds the queue entries. */
+ struct hinic3_dma_addr_align *pages;
+ /* Page size in bytes. */
+ u32 page_size;
+ /* Number of pages, must be power of 2. */
+ u16 num_pages;
+ u8 elem_size_shift;
+ u8 elem_per_pg_shift;
+};
+
+void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth,
+ u32 page_size, u32 elem_size);
+int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 align);
+void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages);
+
+/* Get pointer to queue entry at the specified index. Index does not have to be
+ * masked to queue depth, only least significant bits will be used. Also
+ * provides remaining elements in same page (including the first one) in case
+ * caller needs multiple entries.
+ */
+static inline void *get_q_element(const struct hinic3_queue_pages *qpages,
+ u32 idx, u32 *remaining_in_page)
+{
+ const struct hinic3_dma_addr_align *page;
+ u32 page_idx, elem_idx, elem_per_pg, ofs;
+ u8 shift;
+
+ shift = qpages->elem_per_pg_shift;
+ page_idx = (idx >> shift) & (qpages->num_pages - 1);
+ elem_per_pg = 1 << shift;
+ elem_idx = idx & (elem_per_pg - 1);
+ if (remaining_in_page)
+ *remaining_in_page = elem_per_pg - elem_idx;
+ ofs = elem_idx << qpages->elem_size_shift;
+ page = qpages->pages + page_idx;
+ return (char *)page->align_vaddr + ofs;
+}
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
new file mode 100644
index 000000000000..860163e9d66c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <net/gro.h>
+#include <net/page_pool/helpers.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rx.h"
+
+#define HINIC3_RX_HDR_SIZE 256
+#define HINIC3_RX_BUFFER_WRITE 16
+
+#define HINIC3_RX_TCP_PKT 0x3
+#define HINIC3_RX_UDP_PKT 0x4
+#define HINIC3_RX_SCTP_PKT 0x7
+
+#define HINIC3_RX_IPV4_PKT 0
+#define HINIC3_RX_IPV6_PKT 1
+#define HINIC3_RX_INVALID_IP_TYPE 2
+
+#define HINIC3_RX_PKT_FORMAT_NON_TUNNEL 0
+#define HINIC3_RX_PKT_FORMAT_VXLAN 1
+
+#define HINIC3_LRO_PKT_HDR_LEN_IPV4 66
+#define HINIC3_LRO_PKT_HDR_LEN_IPV6 86
+#define HINIC3_LRO_PKT_HDR_LEN(cqe) \
+ (RQ_CQE_OFFOLAD_TYPE_GET((cqe)->offload_type, IP_TYPE) == \
+ HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \
+ HINIC3_LRO_PKT_HDR_LEN_IPV4)
+
+int hinic3_alloc_rxqs(struct net_device *netdev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+void hinic3_free_rxqs(struct net_device *netdev)
+{
+ /* Completed by later submission due to LoC limit. */
+}
+
+static int rx_alloc_mapped_page(struct page_pool *page_pool,
+ struct hinic3_rx_info *rx_info, u16 buf_len)
+{
+ struct page *page;
+ u32 page_offset;
+
+ page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ rx_info->page = page;
+ rx_info->page_offset = page_offset;
+
+ return 0;
+}
+
+static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx,
+ dma_addr_t dma_addr, u16 len)
+{
+ struct hinic3_rq_wqe *rq_wqe;
+
+ rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL);
+ rq_wqe->buf_hi_addr = upper_32_bits(dma_addr);
+ rq_wqe->buf_lo_addr = lower_32_bits(dma_addr);
+}
+
+static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
+{
+ u32 i, free_wqebbs = rxq->delta - 1;
+ struct hinic3_rx_info *rx_info;
+ dma_addr_t dma_addr;
+ int err;
+
+ for (i = 0; i < free_wqebbs; i++) {
+ rx_info = &rxq->rx_info[rxq->next_to_update];
+
+ err = rx_alloc_mapped_page(rxq->page_pool, rx_info,
+ rxq->buf_len);
+ if (unlikely(err))
+ break;
+
+ dma_addr = page_pool_get_dma_addr(rx_info->page) +
+ rx_info->page_offset;
+ rq_wqe_buf_set(rxq->rq, rxq->next_to_update, dma_addr,
+ rxq->buf_len);
+ rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask;
+ }
+
+ if (likely(i)) {
+ hinic3_write_db(rxq->rq, rxq->q_id & 3, DB_CFLAG_DP_RQ,
+ rxq->next_to_update << HINIC3_NORMAL_RQ_WQE);
+ rxq->delta -= i;
+ rxq->next_to_alloc = rxq->next_to_update;
+ }
+
+ return i;
+}
+
+static void hinic3_add_rx_frag(struct hinic3_rxq *rxq,
+ struct hinic3_rx_info *rx_info,
+ struct sk_buff *skb, u32 size)
+{
+ struct page *page;
+ u8 *va;
+
+ page = rx_info->page;
+ va = (u8 *)page_address(page) + rx_info->page_offset;
+ net_prefetch(va);
+
+ page_pool_dma_sync_for_cpu(rxq->page_pool, page, rx_info->page_offset,
+ rxq->buf_len);
+
+ if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
+ memcpy(__skb_put(skb, size), va,
+ ALIGN(size, sizeof(long)));
+ page_pool_put_full_page(rxq->page_pool, page, false);
+
+ return;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_info->page_offset, size, rxq->buf_len);
+ skb_mark_for_recycle(skb);
+}
+
+static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *skb,
+ u32 sge_num, u32 pkt_len)
+{
+ struct hinic3_rx_info *rx_info;
+ u32 temp_pkt_len = pkt_len;
+ u32 temp_sge_num = sge_num;
+ u32 sw_ci;
+ u32 size;
+
+ sw_ci = rxq->cons_idx & rxq->q_mask;
+ while (temp_sge_num) {
+ rx_info = &rxq->rx_info[sw_ci];
+ sw_ci = (sw_ci + 1) & rxq->q_mask;
+ if (unlikely(temp_pkt_len > rxq->buf_len)) {
+ size = rxq->buf_len;
+ temp_pkt_len -= rxq->buf_len;
+ } else {
+ size = temp_pkt_len;
+ }
+
+ hinic3_add_rx_frag(rxq, rx_info, skb, size);
+
+ /* clear contents of buffer_info */
+ rx_info->page = NULL;
+ temp_sge_num--;
+ }
+}
+
+static u32 hinic3_get_sge_num(struct hinic3_rxq *rxq, u32 pkt_len)
+{
+ u32 sge_num;
+
+ sge_num = pkt_len >> rxq->buf_len_shift;
+ sge_num += (pkt_len & (rxq->buf_len - 1)) ? 1 : 0;
+
+ return sge_num;
+}
+
+static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq,
+ u32 pkt_len)
+{
+ struct sk_buff *skb;
+ u32 sge_num;
+
+ skb = napi_alloc_skb(&rxq->irq_cfg->napi, HINIC3_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ sge_num = hinic3_get_sge_num(rxq, pkt_len);
+
+ net_prefetchw(skb->data);
+ packaging_skb(rxq, skb, sge_num, pkt_len);
+
+ rxq->cons_idx += sge_num;
+ rxq->delta += sge_num;
+
+ return skb;
+}
+
+static void hinic3_pull_tail(struct sk_buff *skb)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int pull_len;
+ unsigned char *va;
+
+ va = skb_frag_address(frag);
+
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(skb->dev, va, HINIC3_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ skb_frag_off_add(frag, pull_len);
+
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type,
+ u32 status, struct sk_buff *skb)
+{
+ u32 pkt_fmt = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT);
+ u32 pkt_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE);
+ u32 ip_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE);
+ u32 csum_err = RQ_CQE_STATUS_GET(status, CSUM_ERR);
+ struct net_device *netdev = rxq->netdev;
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (unlikely(csum_err)) {
+ /* pkt type is recognized by HW, and csum is wrong */
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ if (ip_type == HINIC3_RX_INVALID_IP_TYPE ||
+ !(pkt_fmt == HINIC3_RX_PKT_FORMAT_NON_TUNNEL ||
+ pkt_fmt == HINIC3_RX_PKT_FORMAT_VXLAN)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ switch (pkt_type) {
+ case HINIC3_RX_TCP_PKT:
+ case HINIC3_RX_UDP_PKT:
+ case HINIC3_RX_SCTP_PKT:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ skb->ip_summed = CHECKSUM_NONE;
+ break;
+ }
+}
+
+static void hinic3_lro_set_gso_params(struct sk_buff *skb, u16 num_lro)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ __be16 proto;
+
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(skb->len - skb_headlen(skb),
+ num_lro);
+ skb_shinfo(skb)->gso_type = proto == htons(ETH_P_IP) ?
+ SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_segs = num_lro;
+}
+
+static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe,
+ u32 pkt_len, u32 vlan_len, u32 status)
+{
+ struct net_device *netdev = rxq->netdev;
+ struct sk_buff *skb;
+ u32 offload_type;
+ u16 num_lro;
+
+ skb = hinic3_fetch_rx_buffer(rxq, pkt_len);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ hinic3_pull_tail(skb);
+
+ offload_type = rx_cqe->offload_type;
+ hinic3_rx_csum(rxq, offload_type, status, skb);
+
+ num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
+ if (num_lro)
+ hinic3_lro_set_gso_params(skb, num_lro);
+
+ skb_record_rx_queue(skb, rxq->q_id);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (skb_has_frag_list(skb)) {
+ napi_gro_flush(&rxq->irq_cfg->napi, false);
+ netif_receive_skb(skb);
+ } else {
+ napi_gro_receive(&rxq->irq_cfg->napi, skb);
+ }
+
+ return 0;
+}
+
+int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ u32 sw_ci, status, pkt_len, vlan_len;
+ struct hinic3_rq_cqe *rx_cqe;
+ u32 num_wqe = 0;
+ int nr_pkts = 0;
+ u16 num_lro;
+
+ while (likely(nr_pkts < budget)) {
+ sw_ci = rxq->cons_idx & rxq->q_mask;
+ rx_cqe = rxq->cqe_arr + sw_ci;
+ status = rx_cqe->status;
+ if (!RQ_CQE_STATUS_GET(status, RXDONE))
+ break;
+
+ /* make sure we read rx_done before packet length */
+ rmb();
+
+ vlan_len = rx_cqe->vlan_len;
+ pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN);
+ if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))
+ break;
+
+ nr_pkts++;
+ num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
+ if (num_lro)
+ num_wqe += hinic3_get_sge_num(rxq, pkt_len);
+
+ rx_cqe->status = 0;
+
+ if (num_wqe >= nic_dev->lro_replenish_thld)
+ break;
+ }
+
+ if (rxq->delta >= HINIC3_RX_BUFFER_WRITE)
+ hinic3_rx_fill_buffers(rxq);
+
+ return nr_pkts;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
new file mode 100644
index 000000000000..1cca21858d40
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_RX_H_
+#define _HINIC3_RX_H_
+
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0)
+#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK GENMASK(6, 5)
+#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK GENMASK(11, 8)
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK BIT(21)
+#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \
+ FIELD_GET(RQ_CQE_OFFOLAD_TYPE_##member##_MASK, val)
+
+#define RQ_CQE_SGE_VLAN_MASK GENMASK(15, 0)
+#define RQ_CQE_SGE_LEN_MASK GENMASK(31, 16)
+#define RQ_CQE_SGE_GET(val, member) \
+ FIELD_GET(RQ_CQE_SGE_##member##_MASK, val)
+
+#define RQ_CQE_STATUS_CSUM_ERR_MASK GENMASK(15, 0)
+#define RQ_CQE_STATUS_NUM_LRO_MASK GENMASK(23, 16)
+#define RQ_CQE_STATUS_RXDONE_MASK BIT(31)
+#define RQ_CQE_STATUS_GET(val, member) \
+ FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val)
+
+/* RX Completion information that is provided by HW for a specific RX WQE */
+struct hinic3_rq_cqe {
+ u32 status;
+ u32 vlan_len;
+ u32 offload_type;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+ u32 rsvd6;
+ u32 pkt_info;
+};
+
+struct hinic3_rq_wqe {
+ u32 buf_hi_addr;
+ u32 buf_lo_addr;
+ u32 cqe_hi_addr;
+ u32 cqe_lo_addr;
+};
+
+struct hinic3_rx_info {
+ struct page *page;
+ u32 page_offset;
+};
+
+struct hinic3_rxq {
+ struct net_device *netdev;
+
+ u16 q_id;
+ u32 q_depth;
+ u32 q_mask;
+
+ u16 buf_len;
+ u32 buf_len_shift;
+
+ u32 cons_idx;
+ u32 delta;
+
+ u32 irq_id;
+ u16 msix_entry_idx;
+
+ /* cqe_arr and rx_info are arrays of rq_depth elements. Each element is
+ * statically associated (by index) to a specific rq_wqe.
+ */
+ struct hinic3_rq_cqe *cqe_arr;
+ struct hinic3_rx_info *rx_info;
+ struct page_pool *page_pool;
+
+ struct hinic3_io_queue *rq;
+
+ struct hinic3_irq_cfg *irq_cfg;
+ u16 next_to_alloc;
+ u16 next_to_update;
+ struct device *dev; /* device for DMA mapping */
+
+ dma_addr_t cqe_start_paddr;
+} ____cacheline_aligned;
+
+int hinic3_alloc_rxqs(struct net_device *netdev);
+void hinic3_free_rxqs(struct net_device *netdev);
+
+int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
new file mode 100644
index 000000000000..ae08257dd1d2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <net/ip6_checksum.h>
+#include <net/ipv6.h>
+#include <net/netdev_queues.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_tx.h"
+#include "hinic3_wq.h"
+
+#define MIN_SKB_LEN 32
+
+int hinic3_alloc_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ u16 q_id, num_txqs = nic_dev->max_qps;
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic3_txq *txq;
+
+ if (!num_txqs) {
+ dev_err(hwdev->dev, "Cannot allocate zero size txqs\n");
+ return -EINVAL;
+ }
+
+ nic_dev->txqs = kcalloc(num_txqs, sizeof(*nic_dev->txqs), GFP_KERNEL);
+ if (!nic_dev->txqs)
+ return -ENOMEM;
+
+ for (q_id = 0; q_id < num_txqs; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ txq->netdev = netdev;
+ txq->q_id = q_id;
+ txq->q_depth = nic_dev->q_params.sq_depth;
+ txq->q_mask = nic_dev->q_params.sq_depth - 1;
+ txq->dev = &pdev->dev;
+ }
+
+ return 0;
+}
+
+void hinic3_free_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->txqs);
+}
+
+static void hinic3_set_buf_desc(struct hinic3_sq_bufdesc *buf_descs,
+ dma_addr_t addr, u32 len)
+{
+ buf_descs->hi_addr = upper_32_bits(addr);
+ buf_descs->lo_addr = lower_32_bits(addr);
+ buf_descs->len = len;
+}
+
+static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct hinic3_txq *txq,
+ struct hinic3_tx_info *tx_info,
+ struct hinic3_sq_wqe_combo *wqe_combo)
+{
+ struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
+ struct hinic3_sq_bufdesc *buf_desc = wqe_combo->bds_head;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dma_info *dma_info = tx_info->dma_info;
+ struct pci_dev *pdev = nic_dev->pdev;
+ skb_frag_t *frag;
+ u32 i, idx;
+ int err;
+
+ dma_info[0].dma = dma_map_single(&pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_info[0].dma))
+ return -EFAULT;
+
+ dma_info[0].len = skb_headlen(skb);
+
+ wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma);
+ wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma);
+
+ wqe_desc->ctrl_len = dma_info[0].len;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &(skb_shinfo(skb)->frags[i]);
+ if (unlikely(i == wqe_combo->first_bds_num))
+ buf_desc = wqe_combo->bds_sec2;
+
+ idx = i + 1;
+ dma_info[idx].dma = skb_frag_dma_map(&pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_info[idx].dma)) {
+ err = -EFAULT;
+ goto err_unmap_page;
+ }
+ dma_info[idx].len = skb_frag_size(frag);
+
+ hinic3_set_buf_desc(buf_desc, dma_info[idx].dma,
+ dma_info[idx].len);
+ buf_desc++;
+ }
+
+ return 0;
+
+err_unmap_page:
+ while (idx > 1) {
+ idx--;
+ dma_unmap_page(&pdev->dev, dma_info[idx].dma,
+ dma_info[idx].len, DMA_TO_DEVICE);
+ }
+ dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len,
+ DMA_TO_DEVICE);
+ return err;
+}
+
+static void hinic3_tx_unmap_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct hinic3_dma_info *dma_info)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags;) {
+ i++;
+ dma_unmap_page(&pdev->dev,
+ dma_info[i].dma,
+ dma_info[i].len, DMA_TO_DEVICE);
+ }
+
+ dma_unmap_single(&pdev->dev, dma_info[0].dma,
+ dma_info[0].len, DMA_TO_DEVICE);
+}
+
+union hinic3_ip {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+};
+
+union hinic3_l4 {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+};
+
+enum hinic3_l3_type {
+ HINIC3_L3_UNKNOWN = 0,
+ HINIC3_L3_IP6_PKT = 1,
+ HINIC3_L3_IP4_PKT_NO_CSUM = 2,
+ HINIC3_L3_IP4_PKT_CSUM = 3,
+};
+
+enum hinic3_l4_offload_type {
+ HINIC3_L4_OFFLOAD_DISABLE = 0,
+ HINIC3_L4_OFFLOAD_TCP = 1,
+ HINIC3_L4_OFFLOAD_STCP = 2,
+ HINIC3_L4_OFFLOAD_UDP = 3,
+};
+
+/* initialize l4 offset and offload */
+static void get_inner_l4_info(struct sk_buff *skb, union hinic3_l4 *l4,
+ u8 l4_proto, u32 *offset,
+ enum hinic3_l4_offload_type *l4_offload)
+{
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ *l4_offload = HINIC3_L4_OFFLOAD_TCP;
+ /* To be same with TSO, payload offset begins from payload */
+ *offset = (l4->tcp->doff << TCP_HDR_DATA_OFF_UNIT_SHIFT) +
+ TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_UDP:
+ *l4_offload = HINIC3_L4_OFFLOAD_UDP;
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+ default:
+ *l4_offload = HINIC3_L4_OFFLOAD_DISABLE;
+ *offset = 0;
+ }
+}
+
+static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task,
+ struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->encapsulation) {
+ union hinic3_ip ip;
+ u8 l4_proto;
+
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG);
+
+ ip.hdr = skb_network_header(skb);
+ if (ip.v4->version == 4) {
+ l4_proto = ip.v4->protocol;
+ } else if (ip.v4->version == 6) {
+ union hinic3_l4 l4;
+ unsigned char *exthdr;
+ __be16 frag_off;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ l4.hdr = skb_transport_header(skb);
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ } else {
+ l4_proto = IPPROTO_RAW;
+ }
+
+ if (l4_proto != IPPROTO_UDP ||
+ ((struct udphdr *)skb_transport_header(skb))->dest !=
+ VXLAN_OFFLOAD_PORT_LE) {
+ /* Unsupported tunnel packet, disable csum offload */
+ skb_checksum_help(skb);
+ return 0;
+ }
+ }
+
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+
+ return 1;
+}
+
+static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip,
+ union hinic3_l4 *l4,
+ enum hinic3_l3_type *l3_type, u8 *l4_proto)
+{
+ unsigned char *exthdr;
+ __be16 frag_off;
+
+ if (ip->v4->version == 4) {
+ *l3_type = HINIC3_L3_IP4_PKT_CSUM;
+ *l4_proto = ip->v4->protocol;
+ } else if (ip->v4->version == 6) {
+ *l3_type = HINIC3_L3_IP6_PKT;
+ exthdr = ip->hdr + sizeof(*ip->v6);
+ *l4_proto = ip->v6->nexthdr;
+ if (exthdr != l4->hdr) {
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ l4_proto, &frag_off);
+ }
+ } else {
+ *l3_type = HINIC3_L3_UNKNOWN;
+ *l4_proto = 0;
+ }
+}
+
+static void hinic3_set_tso_info(struct hinic3_sq_task *task, u32 *queue_info,
+ enum hinic3_l4_offload_type l4_offload,
+ u32 offset, u32 mss)
+{
+ if (l4_offload == HINIC3_L4_OFFLOAD_TCP) {
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, TSO);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ } else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) {
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UFO);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ }
+
+ /* enable L3 calculation */
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L3_EN);
+
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF);
+
+ /* set MSS value */
+ *queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK;
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS);
+}
+
+static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto)
+{
+ return (ip->v4->version == 4) ?
+ csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
+ csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
+}
+
+static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info,
+ struct sk_buff *skb)
+{
+ enum hinic3_l4_offload_type l4_offload;
+ enum hinic3_l3_type l3_type;
+ union hinic3_ip ip;
+ union hinic3_l4 l4;
+ u8 l4_proto;
+ u32 offset;
+ int err;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ if (skb->encapsulation) {
+ u32 gso_type = skb_shinfo(skb)->gso_type;
+ /* L3 checksum is always enabled */
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L3_EN);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG);
+
+ l4.hdr = skb_transport_header(skb);
+ ip.hdr = skb_network_header(skb);
+
+ if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L4_EN);
+ }
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
+
+ get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto);
+
+ if (l4_proto == IPPROTO_TCP)
+ l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
+
+ get_inner_l4_info(skb, &l4, l4_proto, &offset, &l4_offload);
+
+ hinic3_set_tso_info(task, queue_info, l4_offload, offset,
+ skb_shinfo(skb)->gso_size);
+
+ return 1;
+}
+
+static void hinic3_set_vlan_tx_offload(struct hinic3_sq_task *task,
+ u16 vlan_tag, u8 vlan_tpid)
+{
+ /* vlan_tpid: 0=select TPID0 in IPSU, 1=select TPID1 in IPSU
+ * 2=select TPID2 in IPSU, 3=select TPID3 in IPSU,
+ * 4=select TPID4 in IPSU
+ */
+ task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) |
+ SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) |
+ SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID);
+}
+
+static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task,
+ u32 *queue_info, struct hinic3_txq *txq)
+{
+ u32 offload = 0;
+ int tso_cs_en;
+
+ task->pkt_info0 = 0;
+ task->ip_identify = 0;
+ task->rsvd = 0;
+ task->vlan_offload = 0;
+
+ tso_cs_en = hinic3_tso(task, queue_info, skb);
+ if (tso_cs_en < 0) {
+ offload = HINIC3_TX_OFFLOAD_INVALID;
+ return offload;
+ } else if (tso_cs_en) {
+ offload |= HINIC3_TX_OFFLOAD_TSO;
+ } else {
+ tso_cs_en = hinic3_tx_csum(txq, task, skb);
+ if (tso_cs_en)
+ offload |= HINIC3_TX_OFFLOAD_CSUM;
+ }
+
+#define VLAN_INSERT_MODE_MAX 5
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ /* select vlan insert mode by qid, default 802.1Q Tag type */
+ hinic3_set_vlan_tx_offload(task, skb_vlan_tag_get(skb),
+ txq->q_id % VLAN_INSERT_MODE_MAX);
+ offload |= HINIC3_TX_OFFLOAD_VLAN;
+ }
+
+ if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) >
+ SQ_CTRL_MAX_PLDOFF)) {
+ offload = HINIC3_TX_OFFLOAD_INVALID;
+ return offload;
+ }
+
+ return offload;
+}
+
+static u16 hinic3_get_and_update_sq_owner(struct hinic3_io_queue *sq,
+ u16 curr_pi, u16 wqebb_cnt)
+{
+ u16 owner = sq->owner;
+
+ if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth))
+ sq->owner = !sq->owner;
+
+ return owner;
+}
+
+static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq,
+ struct hinic3_sq_wqe_combo *wqe_combo,
+ u32 offload, u16 num_sge, u16 *curr_pi)
+{
+ struct hinic3_sq_bufdesc *first_part_wqebbs, *second_part_wqebbs;
+ u16 first_part_wqebbs_num, tmp_pi;
+
+ wqe_combo->ctrl_bd0 = hinic3_wq_get_one_wqebb(&txq->sq->wq, curr_pi);
+ if (!offload && num_sge == 1) {
+ wqe_combo->wqe_type = SQ_WQE_COMPACT_TYPE;
+ return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi, 1);
+ }
+
+ wqe_combo->wqe_type = SQ_WQE_EXTENDED_TYPE;
+
+ if (offload) {
+ wqe_combo->task = hinic3_wq_get_one_wqebb(&txq->sq->wq,
+ &tmp_pi);
+ wqe_combo->task_type = SQ_WQE_TASKSECT_16BYTES;
+ } else {
+ wqe_combo->task_type = SQ_WQE_TASKSECT_46BITS;
+ }
+
+ if (num_sge > 1) {
+ /* first wqebb contain bd0, and bd size is equal to sq wqebb
+ * size, so we use (num_sge - 1) as wanted weqbb_cnt
+ */
+ hinic3_wq_get_multi_wqebbs(&txq->sq->wq, num_sge - 1, &tmp_pi,
+ &first_part_wqebbs,
+ &second_part_wqebbs,
+ &first_part_wqebbs_num);
+ wqe_combo->bds_head = first_part_wqebbs;
+ wqe_combo->bds_sec2 = second_part_wqebbs;
+ wqe_combo->first_bds_num = first_part_wqebbs_num;
+ }
+
+ return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi,
+ num_sge + !!offload);
+}
+
+static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
+ u32 queue_info, int nr_descs, u16 owner)
+{
+ struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
+
+ if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) {
+ wqe_desc->ctrl_len |=
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER);
+
+ /* compact wqe queue_info will transfer to chip */
+ wqe_desc->queue_info = 0;
+ return;
+ }
+
+ wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) |
+ SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) |
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER);
+
+ wqe_desc->queue_info = queue_info;
+ wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UC);
+
+ if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) {
+ wqe_desc->queue_info |=
+ SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS);
+ } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) <
+ HINIC3_TX_MSS_MIN) {
+ /* mss should not be less than 80 */
+ wqe_desc->queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK;
+ wqe_desc->queue_info |=
+ SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS);
+ }
+}
+
+static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct hinic3_txq *txq)
+{
+ struct hinic3_sq_wqe_combo wqe_combo = {};
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_txq *tx_q = txq;
+ u32 offload, queue_info = 0;
+ struct hinic3_sq_task task;
+ u16 wqebb_cnt, num_sge;
+ u16 saved_wq_prod_idx;
+ u16 owner, pi = 0;
+ u8 saved_sq_owner;
+ int err;
+
+ if (unlikely(skb->len < MIN_SKB_LEN)) {
+ if (skb_pad(skb, MIN_SKB_LEN - skb->len))
+ goto err_out;
+
+ skb->len = MIN_SKB_LEN;
+ }
+
+ num_sge = skb_shinfo(skb)->nr_frags + 1;
+ /* assume normal wqe format + 1 wqebb for task info */
+ wqebb_cnt = num_sge + 1;
+
+ if (unlikely(hinic3_wq_free_wqebbs(&txq->sq->wq) < wqebb_cnt)) {
+ if (likely(wqebb_cnt > txq->tx_stop_thrs))
+ txq->tx_stop_thrs = min(wqebb_cnt, txq->tx_start_thrs);
+
+ netif_subqueue_try_stop(netdev, tx_q->sq->q_id,
+ hinic3_wq_free_wqebbs(&tx_q->sq->wq),
+ tx_q->tx_start_thrs);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ offload = hinic3_tx_offload(skb, &task, &queue_info, txq);
+ if (unlikely(offload == HINIC3_TX_OFFLOAD_INVALID)) {
+ goto err_drop_pkt;
+ } else if (!offload) {
+ wqebb_cnt -= 1;
+ if (unlikely(num_sge == 1 &&
+ skb->len > HINIC3_COMPACT_WQEE_SKB_MAX_LEN))
+ goto err_drop_pkt;
+ }
+
+ saved_wq_prod_idx = txq->sq->wq.prod_idx;
+ saved_sq_owner = txq->sq->owner;
+
+ owner = hinic3_set_wqe_combo(txq, &wqe_combo, offload, num_sge, &pi);
+ if (offload)
+ *wqe_combo.task = task;
+
+ tx_info = &txq->tx_info[pi];
+ tx_info->skb = skb;
+ tx_info->wqebb_cnt = wqebb_cnt;
+
+ err = hinic3_tx_map_skb(netdev, skb, txq, tx_info, &wqe_combo);
+ if (err) {
+ /* Rollback work queue to reclaim the wqebb we did not use */
+ txq->sq->wq.prod_idx = saved_wq_prod_idx;
+ txq->sq->owner = saved_sq_owner;
+ goto err_drop_pkt;
+ }
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(netdev, txq->sq->q_id),
+ skb->len);
+ netif_subqueue_maybe_stop(netdev, tx_q->sq->q_id,
+ hinic3_wq_free_wqebbs(&tx_q->sq->wq),
+ tx_q->tx_stop_thrs,
+ tx_q->tx_start_thrs);
+
+ hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner);
+ hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ,
+ hinic3_get_sq_local_pi(txq->sq));
+
+ return NETDEV_TX_OK;
+
+err_drop_pkt:
+ dev_kfree_skb_any(skb);
+
+err_out:
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 q_id = skb_get_queue_mapping(skb);
+
+ if (unlikely(!netif_carrier_ok(netdev)))
+ goto err_drop_pkt;
+
+ if (unlikely(q_id >= nic_dev->q_params.num_qps))
+ goto err_drop_pkt;
+
+ return hinic3_send_one_skb(skb, netdev, &nic_dev->txqs[q_id]);
+
+err_drop_pkt:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static bool is_hw_complete_sq_process(struct hinic3_io_queue *sq)
+{
+ u16 sw_pi, hw_ci;
+
+ sw_pi = hinic3_get_sq_local_pi(sq);
+ hw_ci = hinic3_get_sq_hw_ci(sq);
+
+ return sw_pi == hw_ci;
+}
+
+#define HINIC3_FLUSH_QUEUE_POLL_SLEEP_US 10000
+#define HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US 10000000
+static int hinic3_stop_sq(struct hinic3_txq *txq)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(txq->netdev);
+ int err, rc;
+
+ err = read_poll_timeout(hinic3_force_drop_tx_pkt, rc,
+ is_hw_complete_sq_process(txq->sq) || rc,
+ HINIC3_FLUSH_QUEUE_POLL_SLEEP_US,
+ HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US,
+ true, nic_dev->hwdev);
+ if (rc)
+ return rc;
+ else
+ return err;
+}
+
+/* packet transmission should be stopped before calling this function */
+void hinic3_flush_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 qid;
+ int err;
+
+ for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) {
+ err = hinic3_stop_sq(&nic_dev->txqs[qid]);
+ netdev_tx_reset_subqueue(netdev, qid);
+ if (err)
+ netdev_err(netdev, "Failed to stop sq%u\n", qid);
+ }
+}
+
+#define HINIC3_BDS_PER_SQ_WQEBB \
+ (HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc))
+
+bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
+{
+ struct net_device *netdev = txq->netdev;
+ u16 hw_ci, sw_ci, q_id = txq->sq->q_id;
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_txq *tx_q = txq;
+ unsigned int bytes_compl = 0;
+ unsigned int pkts = 0;
+ u16 wqebb_cnt = 0;
+
+ hw_ci = hinic3_get_sq_hw_ci(txq->sq);
+ dma_rmb();
+ sw_ci = hinic3_get_sq_local_ci(txq->sq);
+
+ do {
+ tx_info = &txq->tx_info[sw_ci];
+
+ /* Did all wqebb of this wqe complete? */
+ if (hw_ci == sw_ci ||
+ ((hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt)
+ break;
+
+ sw_ci = (sw_ci + tx_info->wqebb_cnt) & txq->q_mask;
+ net_prefetch(&txq->tx_info[sw_ci]);
+
+ wqebb_cnt += tx_info->wqebb_cnt;
+ bytes_compl += tx_info->skb->len;
+ pkts++;
+
+ hinic3_tx_unmap_skb(netdev, tx_info->skb, tx_info->dma_info);
+ napi_consume_skb(tx_info->skb, budget);
+ tx_info->skb = NULL;
+ } while (likely(pkts < HINIC3_TX_POLL_WEIGHT));
+
+ hinic3_wq_put_wqebbs(&txq->sq->wq, wqebb_cnt);
+
+ netif_subqueue_completed_wake(netdev, q_id, pkts, bytes_compl,
+ hinic3_wq_free_wqebbs(&tx_q->sq->wq),
+ tx_q->tx_start_thrs);
+
+ return pkts == HINIC3_TX_POLL_WEIGHT;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
new file mode 100644
index 000000000000..9e505cc19dd5
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_TX_H_
+#define _HINIC3_TX_H_
+
+#include <linux/bitops.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netdevice.h>
+#include <net/checksum.h>
+
+#define VXLAN_OFFLOAD_PORT_LE cpu_to_be16(4789)
+#define TCP_HDR_DATA_OFF_UNIT_SHIFT 2
+#define TRANSPORT_OFFSET(l4_hdr, skb) ((l4_hdr) - (skb)->data)
+
+#define HINIC3_COMPACT_WQEE_SKB_MAX_LEN 16383
+#define HINIC3_TX_POLL_WEIGHT 64
+#define HINIC3_DEFAULT_STOP_THRS 6
+#define HINIC3_DEFAULT_START_THRS 24
+
+enum sq_wqe_data_format {
+ SQ_NORMAL_WQE = 0,
+};
+
+enum sq_wqe_ec_type {
+ SQ_WQE_COMPACT_TYPE = 0,
+ SQ_WQE_EXTENDED_TYPE = 1,
+};
+
+enum sq_wqe_tasksect_len_type {
+ SQ_WQE_TASKSECT_46BITS = 0,
+ SQ_WQE_TASKSECT_16BYTES = 1,
+};
+
+enum hinic3_tx_offload_type {
+ HINIC3_TX_OFFLOAD_TSO = BIT(0),
+ HINIC3_TX_OFFLOAD_CSUM = BIT(1),
+ HINIC3_TX_OFFLOAD_VLAN = BIT(2),
+ HINIC3_TX_OFFLOAD_INVALID = BIT(3),
+ HINIC3_TX_OFFLOAD_ESP = BIT(4),
+};
+
+#define SQ_CTRL_BUFDESC_NUM_MASK GENMASK(26, 19)
+#define SQ_CTRL_TASKSECT_LEN_MASK BIT(27)
+#define SQ_CTRL_DATA_FORMAT_MASK BIT(28)
+#define SQ_CTRL_EXTENDED_MASK BIT(30)
+#define SQ_CTRL_OWNER_MASK BIT(31)
+#define SQ_CTRL_SET(val, member) \
+ FIELD_PREP(SQ_CTRL_##member##_MASK, val)
+
+#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK GENMASK(9, 2)
+#define SQ_CTRL_QUEUE_INFO_UFO_MASK BIT(10)
+#define SQ_CTRL_QUEUE_INFO_TSO_MASK BIT(11)
+#define SQ_CTRL_QUEUE_INFO_MSS_MASK GENMASK(26, 13)
+#define SQ_CTRL_QUEUE_INFO_UC_MASK BIT(28)
+
+#define SQ_CTRL_QUEUE_INFO_SET(val, member) \
+ FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
+#define SQ_CTRL_QUEUE_INFO_GET(val, member) \
+ FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
+
+#define SQ_CTRL_MAX_PLDOFF 221
+
+#define SQ_TASK_INFO0_TUNNEL_FLAG_MASK BIT(19)
+#define SQ_TASK_INFO0_INNER_L4_EN_MASK BIT(24)
+#define SQ_TASK_INFO0_INNER_L3_EN_MASK BIT(25)
+#define SQ_TASK_INFO0_OUT_L4_EN_MASK BIT(27)
+#define SQ_TASK_INFO0_OUT_L3_EN_MASK BIT(28)
+#define SQ_TASK_INFO0_SET(val, member) \
+ FIELD_PREP(SQ_TASK_INFO0_##member##_MASK, val)
+
+#define SQ_TASK_INFO3_VLAN_TAG_MASK GENMASK(15, 0)
+#define SQ_TASK_INFO3_VLAN_TPID_MASK GENMASK(18, 16)
+#define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK BIT(19)
+#define SQ_TASK_INFO3_SET(val, member) \
+ FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val)
+
+struct hinic3_sq_wqe_desc {
+ u32 ctrl_len;
+ u32 queue_info;
+ u32 hi_addr;
+ u32 lo_addr;
+};
+
+struct hinic3_sq_task {
+ u32 pkt_info0;
+ u32 ip_identify;
+ u32 rsvd;
+ u32 vlan_offload;
+};
+
+struct hinic3_sq_wqe_combo {
+ struct hinic3_sq_wqe_desc *ctrl_bd0;
+ struct hinic3_sq_task *task;
+ struct hinic3_sq_bufdesc *bds_head;
+ struct hinic3_sq_bufdesc *bds_sec2;
+ u16 first_bds_num;
+ u32 wqe_type;
+ u32 task_type;
+};
+
+struct hinic3_dma_info {
+ dma_addr_t dma;
+ u32 len;
+};
+
+struct hinic3_tx_info {
+ struct sk_buff *skb;
+ u16 wqebb_cnt;
+ struct hinic3_dma_info *dma_info;
+};
+
+struct hinic3_txq {
+ struct net_device *netdev;
+ struct device *dev;
+
+ u16 q_id;
+ u16 tx_stop_thrs;
+ u16 tx_start_thrs;
+ u32 q_mask;
+ u32 q_depth;
+
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_io_queue *sq;
+} ____cacheline_aligned;
+
+int hinic3_alloc_txqs(struct net_device *netdev);
+void hinic3_free_txqs(struct net_device *netdev);
+
+netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+bool hinic3_tx_poll(struct hinic3_txq *txq, int budget);
+void hinic3_flush_txqs(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
new file mode 100644
index 000000000000..2ac7efcd1365
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/dma-mapping.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_wq.h"
+
+void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
+ u16 num_wqebbs, u16 *prod_idx,
+ struct hinic3_sq_bufdesc **first_part_wqebbs,
+ struct hinic3_sq_bufdesc **second_part_wqebbs,
+ u16 *first_part_wqebbs_num)
+{
+ u32 idx, remaining;
+
+ idx = wq->prod_idx & wq->idx_mask;
+ wq->prod_idx += num_wqebbs;
+ *prod_idx = idx;
+ *first_part_wqebbs = get_q_element(&wq->qpages, idx, &remaining);
+ if (likely(remaining >= num_wqebbs)) {
+ *first_part_wqebbs_num = num_wqebbs;
+ *second_part_wqebbs = NULL;
+ } else {
+ *first_part_wqebbs_num = remaining;
+ idx += remaining;
+ *second_part_wqebbs = get_q_element(&wq->qpages, idx, NULL);
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
new file mode 100644
index 000000000000..ab37893efd7e
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_WQ_H_
+#define _HINIC3_WQ_H_
+
+#include <linux/io.h>
+
+#include "hinic3_queue_common.h"
+
+struct hinic3_sq_bufdesc {
+ /* 31-bits Length, L2NIC only uses length[17:0] */
+ u32 len;
+ u32 rsvd;
+ u32 hi_addr;
+ u32 lo_addr;
+};
+
+/* Work queue is used to submit elements (tx, rx, cmd) to hw.
+ * Driver is the producer that advances prod_idx. cons_idx is advanced when
+ * HW reports completions of previously submitted elements.
+ */
+struct hinic3_wq {
+ struct hinic3_queue_pages qpages;
+ /* Unmasked producer/consumer indices that are advanced to natural
+ * integer overflow regardless of queue depth.
+ */
+ u16 cons_idx;
+ u16 prod_idx;
+
+ u32 q_depth;
+ u16 idx_mask;
+
+ /* Work Queue (logical WQEBB array) is mapped to hw via Chip Logical
+ * Address (CLA) using 1 of 2 levels:
+ * level 0 - direct mapping of single wq page
+ * level 1 - indirect mapping of multiple pages via additional page
+ * table.
+ * When wq uses level 1, wq_block will hold the allocated indirection
+ * table.
+ */
+ dma_addr_t wq_block_paddr;
+ __be64 *wq_block_vaddr;
+} ____cacheline_aligned;
+
+/* Get number of elements in work queue that are in-use. */
+static inline u16 hinic3_wq_get_used(const struct hinic3_wq *wq)
+{
+ return READ_ONCE(wq->prod_idx) - READ_ONCE(wq->cons_idx);
+}
+
+static inline u16 hinic3_wq_free_wqebbs(struct hinic3_wq *wq)
+{
+ /* Don't allow queue to become completely full, report (free - 1). */
+ return wq->q_depth - hinic3_wq_get_used(wq) - 1;
+}
+
+static inline void *hinic3_wq_get_one_wqebb(struct hinic3_wq *wq, u16 *pi)
+{
+ *pi = wq->prod_idx & wq->idx_mask;
+ wq->prod_idx++;
+ return get_q_element(&wq->qpages, *pi, NULL);
+}
+
+static inline void hinic3_wq_put_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs)
+{
+ wq->cons_idx += num_wqebbs;
+}
+
+void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
+ u16 num_wqebbs, u16 *prod_idx,
+ struct hinic3_sq_bufdesc **first_part_wqebbs,
+ struct hinic3_sq_bufdesc **second_part_wqebbs,
+ u16 *first_part_wqebbs_num);
+
+#endif
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index c0c112d95b89..4f4b23465c47 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -27,6 +27,19 @@ config IBMVETH
To compile this driver as a module, choose M here. The module will
be called ibmveth.
+config IBMVETH_KUNIT_TEST
+ bool "KUnit test for IBM LAN Virtual Ethernet support" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on KUNIT=y && IBMVETH=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for the IBM LAN Virtual Ethernet driver.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
source "drivers/net/ethernet/ibm/emac/Kconfig"
config EHEA
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 25b8a3556004..417dfa18daae 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2554,17 +2554,12 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
struct mii_bus *bus;
int res;
- mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
+ mii_np = of_get_available_child_by_name(dev->ofdev->dev.of_node, "mdio");
if (!mii_np) {
dev_err(&dev->ofdev->dev, "no mdio definition found.");
return -ENODEV;
}
- if (!of_device_is_available(mii_np)) {
- res = -ENODEV;
- goto put_node;
- }
-
bus = devm_mdiobus_alloc(&dev->ofdev->dev);
if (!bus) {
res = -ENOMEM;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b619a3ec245b..24046fe16634 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -39,8 +39,6 @@
#include "ibmveth.h"
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
- bool reuse);
static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
static struct kobj_type ktype_veth_pool;
@@ -231,7 +229,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
index = pool->free_map[free_index];
skb = NULL;
- BUG_ON(index == IBM_VETH_INVALID_MAP);
+ if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
+ schedule_work(&adapter->work);
+ goto bad_index_failure;
+ }
/* are we allocating a new buffer or recycling an old one */
if (pool->skbuff[index])
@@ -300,6 +301,7 @@ failure:
DMA_FROM_DEVICE);
dev_kfree_skb_any(pool->skbuff[index]);
pool->skbuff[index] = NULL;
+bad_index_failure:
adapter->replenish_add_buff_failure++;
mb();
@@ -370,20 +372,36 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
}
}
-/* remove a buffer from a pool */
-static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
- u64 correlator, bool reuse)
+/**
+ * ibmveth_remove_buffer_from_pool - remove a buffer from a pool
+ * @adapter: adapter instance
+ * @correlator: identifies pool and index
+ * @reuse: whether to reuse buffer
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - correlator maps to pool or index out of range
+ * * %-EFAULT - pool and index map to null skb
+ */
+static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
+ u64 correlator, bool reuse)
{
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
unsigned int free_index;
struct sk_buff *skb;
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+ if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
+ WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
+ schedule_work(&adapter->work);
+ return -EINVAL;
+ }
skb = adapter->rx_buff_pool[pool].skbuff[index];
- BUG_ON(skb == NULL);
+ if (WARN_ON(!skb)) {
+ schedule_work(&adapter->work);
+ return -EFAULT;
+ }
/* if we are going to reuse the buffer then keep the pointers around
* but mark index as available. replenish will see the skb pointer and
@@ -411,6 +429,8 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
mb();
atomic_dec(&(adapter->rx_buff_pool[pool].available));
+
+ return 0;
}
/* get the current buffer on the rx queue */
@@ -420,24 +440,44 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+ if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
+ WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
+ schedule_work(&adapter->work);
+ return NULL;
+ }
return adapter->rx_buff_pool[pool].skbuff[index];
}
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
- bool reuse)
+/**
+ * ibmveth_rxq_harvest_buffer - Harvest buffer from pool
+ *
+ * @adapter: pointer to adapter
+ * @reuse: whether to reuse buffer
+ *
+ * Context: called from ibmveth_poll
+ *
+ * Return:
+ * * %0 - success
+ * * other - non-zero return from ibmveth_remove_buffer_from_pool
+ */
+static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
+ bool reuse)
{
u64 cor;
+ int rc;
cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
- ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
+ rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
+ if (unlikely(rc))
+ return rc;
if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
}
+
+ return 0;
}
static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
@@ -709,6 +749,35 @@ static int ibmveth_close(struct net_device *netdev)
return 0;
}
+/**
+ * ibmveth_reset - Handle scheduled reset work
+ *
+ * @w: pointer to work_struct embedded in adapter structure
+ *
+ * Context: This routine acquires rtnl_mutex and disables its NAPI through
+ * ibmveth_close. It can't be called directly in a context that has
+ * already acquired rtnl_mutex or disabled its NAPI, or directly from
+ * a poll routine.
+ *
+ * Return: void
+ */
+static void ibmveth_reset(struct work_struct *w)
+{
+ struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work);
+ struct net_device *netdev = adapter->netdev;
+
+ netdev_dbg(netdev, "reset starting\n");
+
+ rtnl_lock();
+
+ dev_close(adapter->netdev);
+ dev_open(adapter->netdev, NULL);
+
+ rtnl_unlock();
+
+ netdev_dbg(netdev, "reset complete\n");
+}
+
static int ibmveth_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
@@ -1324,7 +1393,8 @@ restart_poll:
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
netdev_dbg(netdev, "recycling invalid buffer\n");
- ibmveth_rxq_harvest_buffer(adapter, true);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
+ break;
} else {
struct sk_buff *skb, *new_skb;
int length = ibmveth_rxq_frame_length(adapter);
@@ -1334,6 +1404,8 @@ restart_poll:
__sum16 iph_check = 0;
skb = ibmveth_rxq_get_buffer(adapter);
+ if (unlikely(!skb))
+ break;
/* if the large packet bit is set in the rx queue
* descriptor, the mss will be written by PHYP eight
@@ -1357,10 +1429,12 @@ restart_poll:
if (rx_flush)
ibmveth_flush_buffer(skb->data,
length + offset);
- ibmveth_rxq_harvest_buffer(adapter, true);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
+ break;
skb = new_skb;
} else {
- ibmveth_rxq_harvest_buffer(adapter, false);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false)))
+ break;
skb_reserve(skb, offset);
}
@@ -1407,7 +1481,10 @@ restart_poll:
* then check once more to make sure we are done.
*/
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
- BUG_ON(lpar_rc != H_SUCCESS);
+ if (WARN_ON(lpar_rc != H_SUCCESS)) {
+ schedule_work(&adapter->work);
+ goto out;
+ }
if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
@@ -1428,7 +1505,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
if (napi_schedule_prep(&adapter->napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
- BUG_ON(lpar_rc != H_SUCCESS);
+ WARN_ON(lpar_rc != H_SUCCESS);
__napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1670,6 +1747,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev;
adapter->netdev = netdev;
+ INIT_WORK(&adapter->work, ibmveth_reset);
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
ibmveth_init_link_settings(netdev);
@@ -1762,6 +1840,8 @@ static void ibmveth_remove(struct vio_dev *dev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
int i;
+ cancel_work_sync(&adapter->work);
+
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
kobject_put(&adapter->rx_buff_pool[i].kobj);
@@ -1791,6 +1871,26 @@ static ssize_t veth_pool_show(struct kobject *kobj,
return 0;
}
+/**
+ * veth_pool_store - sysfs store handler for pool attributes
+ * @kobj: kobject embedded in pool
+ * @attr: attribute being changed
+ * @buf: value being stored
+ * @count: length of @buf in bytes
+ *
+ * Stores new value in pool attribute. Verifies the range of the new value for
+ * size and buff_size. Verifies that at least one pool remains available to
+ * receive MTU-sized packets.
+ *
+ * Context: Process context.
+ * Takes and releases rtnl_mutex to ensure correct ordering of close
+ * and open calls.
+ * Return:
+ * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools
+ * * %-EINVAL - New pool size or buffer size is out of range
+ * * count - Return count for success
+ * * other - Return value from a failed ibmveth_open call
+ */
static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
@@ -1800,24 +1900,30 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent));
struct ibmveth_adapter *adapter = netdev_priv(netdev);
long value = simple_strtol(buf, NULL, 10);
+ bool change = false;
+ u32 newbuff_size;
+ u32 oldbuff_size;
+ int newactive;
+ int oldactive;
+ u32 newsize;
+ u32 oldsize;
long rc;
+ rtnl_lock();
+
+ oldbuff_size = pool->buff_size;
+ oldactive = pool->active;
+ oldsize = pool->size;
+
+ newbuff_size = oldbuff_size;
+ newactive = oldactive;
+ newsize = oldsize;
+
if (attr == &veth_active_attr) {
- if (value && !pool->active) {
- if (netif_running(netdev)) {
- if (ibmveth_alloc_buffer_pool(pool)) {
- netdev_err(netdev,
- "unable to alloc pool\n");
- return -ENOMEM;
- }
- pool->active = 1;
- ibmveth_close(netdev);
- if ((rc = ibmveth_open(netdev)))
- return rc;
- } else {
- pool->active = 1;
- }
- } else if (!value && pool->active) {
+ if (value && !oldactive) {
+ newactive = 1;
+ change = true;
+ } else if (!value && oldactive) {
int mtu = netdev->mtu + IBMVETH_BUFF_OH;
int i;
/* Make sure there is a buffer pool with buffers that
@@ -1833,48 +1939,60 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
if (i == IBMVETH_NUM_BUFF_POOLS) {
netdev_err(netdev, "no active pool >= MTU\n");
- return -EPERM;
+ rc = -EPERM;
+ goto unlock_err;
}
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->active = 0;
- if ((rc = ibmveth_open(netdev)))
- return rc;
- }
- pool->active = 0;
+ newactive = 0;
+ change = true;
}
} else if (attr == &veth_num_attr) {
if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
- return -EINVAL;
- } else {
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->size = value;
- if ((rc = ibmveth_open(netdev)))
- return rc;
- } else {
- pool->size = value;
- }
+ rc = -EINVAL;
+ goto unlock_err;
+ }
+ if (value != oldsize) {
+ newsize = value;
+ change = true;
}
} else if (attr == &veth_size_attr) {
if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
- return -EINVAL;
- } else {
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->buff_size = value;
- if ((rc = ibmveth_open(netdev)))
- return rc;
- } else {
- pool->buff_size = value;
+ rc = -EINVAL;
+ goto unlock_err;
+ }
+ if (value != oldbuff_size) {
+ newbuff_size = value;
+ change = true;
+ }
+ }
+
+ if (change) {
+ if (netif_running(netdev))
+ ibmveth_close(netdev);
+
+ pool->active = newactive;
+ pool->buff_size = newbuff_size;
+ pool->size = newsize;
+
+ if (netif_running(netdev)) {
+ rc = ibmveth_open(netdev);
+ if (rc) {
+ pool->active = oldactive;
+ pool->buff_size = oldbuff_size;
+ pool->size = oldsize;
+ goto unlock_err;
}
}
}
+ rtnl_unlock();
/* kick the interrupt handler to allocate/deallocate pools */
ibmveth_interrupt(netdev->irq, netdev);
return count;
+
+unlock_err:
+ rtnl_unlock();
+ return rc;
}
@@ -1947,3 +2065,132 @@ static void __exit ibmveth_module_exit(void)
module_init(ibmveth_module_init);
module_exit(ibmveth_module_exit);
+
+#ifdef CONFIG_IBMVETH_KUNIT_TEST
+#include <kunit/test.h>
+
+/**
+ * ibmveth_reset_kunit - reset routine for running in KUnit environment
+ *
+ * @w: pointer to work_struct embedded in adapter structure
+ *
+ * Context: Called in the KUnit environment. Does nothing.
+ *
+ * Return: void
+ */
+static void ibmveth_reset_kunit(struct work_struct *w)
+{
+ netdev_dbg(NULL, "reset_kunit starting\n");
+ netdev_dbg(NULL, "reset_kunit complete\n");
+}
+
+/**
+ * ibmveth_remove_buffer_from_pool_test - unit test for some of
+ * ibmveth_remove_buffer_from_pool
+ * @test: pointer to kunit structure
+ *
+ * Tests the error returns from ibmveth_remove_buffer_from_pool.
+ * ibmveth_remove_buffer_from_pool also calls WARN_ON, so dmesg should be
+ * checked to see that these warnings happened.
+ *
+ * Return: void
+ */
+static void ibmveth_remove_buffer_from_pool_test(struct kunit *test)
+{
+ struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
+ struct ibmveth_buff_pool *pool;
+ u64 correlator;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
+
+ INIT_WORK(&adapter->work, ibmveth_reset_kunit);
+
+ /* Set sane values for buffer pools */
+ for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+
+ pool = &adapter->rx_buff_pool[0];
+ pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
+
+ correlator = ((u64)IBMVETH_NUM_BUFF_POOLS << 32) | 0;
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ correlator = ((u64)0 << 32) | adapter->rx_buff_pool[0].size;
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ correlator = (u64)0 | 0;
+ pool->skbuff[0] = NULL;
+ KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ flush_work(&adapter->work);
+}
+
+/**
+ * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer
+ * @test: pointer to kunit structure
+ *
+ * Tests ibmveth_rxq_get_buffer. ibmveth_rxq_get_buffer also calls WARN_ON for
+ * the NULL returns, so dmesg should be checked to see that these warnings
+ * happened.
+ *
+ * Return: void
+ */
+static void ibmveth_rxq_get_buffer_test(struct kunit *test)
+{
+ struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
+ struct sk_buff *skb = kunit_kzalloc(test, sizeof(*skb), GFP_KERNEL);
+ struct ibmveth_buff_pool *pool;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+
+ INIT_WORK(&adapter->work, ibmveth_reset_kunit);
+
+ adapter->rx_queue.queue_len = 1;
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.queue_addr = kunit_kzalloc(test, sizeof(struct ibmveth_rx_q_entry),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter->rx_queue.queue_addr);
+
+ /* Set sane values for buffer pools */
+ for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+
+ pool = &adapter->rx_buff_pool[0];
+ pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
+
+ adapter->rx_queue.queue_addr[0].correlator = (u64)IBMVETH_NUM_BUFF_POOLS << 32 | 0;
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
+
+ adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | adapter->rx_buff_pool[0].size;
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
+
+ pool->skbuff[0] = skb;
+ adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | 0;
+ KUNIT_EXPECT_PTR_EQ(test, skb, ibmveth_rxq_get_buffer(adapter));
+
+ flush_work(&adapter->work);
+}
+
+static struct kunit_case ibmveth_test_cases[] = {
+ KUNIT_CASE(ibmveth_remove_buffer_from_pool_test),
+ KUNIT_CASE(ibmveth_rxq_get_buffer_test),
+ {}
+};
+
+static struct kunit_suite ibmveth_test_suite = {
+ .name = "ibmveth-kunit-test",
+ .test_cases = ibmveth_test_cases,
+};
+
+kunit_test_suite(ibmveth_test_suite);
+#endif
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 8468e2c59d7a..b0a2460ec9f9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -134,38 +134,39 @@ struct ibmveth_rx_q {
};
struct ibmveth_adapter {
- struct vio_dev *vdev;
- struct net_device *netdev;
- struct napi_struct napi;
- unsigned int mcastFilterSize;
- void * buffer_list_addr;
- void * filter_list_addr;
- void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
- unsigned int tx_ltb_size;
- dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
- dma_addr_t buffer_list_dma;
- dma_addr_t filter_list_dma;
- struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
- struct ibmveth_rx_q rx_queue;
- int rx_csum;
- int large_send;
- bool is_active_trunk;
-
- u64 fw_ipv6_csum_support;
- u64 fw_ipv4_csum_support;
- u64 fw_large_send_support;
- /* adapter specific stats */
- u64 replenish_task_cycles;
- u64 replenish_no_mem;
- u64 replenish_add_buff_failure;
- u64 replenish_add_buff_success;
- u64 rx_invalid_buffer;
- u64 rx_no_buffer;
- u64 tx_map_failed;
- u64 tx_send_failed;
- u64 tx_large_packets;
- u64 rx_large_packets;
- /* Ethtool settings */
+ struct vio_dev *vdev;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ struct work_struct work;
+ unsigned int mcastFilterSize;
+ void *buffer_list_addr;
+ void *filter_list_addr;
+ void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
+ unsigned int tx_ltb_size;
+ dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
+ dma_addr_t buffer_list_dma;
+ dma_addr_t filter_list_dma;
+ struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
+ struct ibmveth_rx_q rx_queue;
+ int rx_csum;
+ int large_send;
+ bool is_active_trunk;
+
+ u64 fw_ipv6_csum_support;
+ u64 fw_ipv4_csum_support;
+ u64 fw_large_send_support;
+ /* adapter specific stats */
+ u64 replenish_task_cycles;
+ u64 replenish_no_mem;
+ u64 replenish_add_buff_failure;
+ u64 replenish_add_buff_success;
+ u64 rx_invalid_buffer;
+ u64 rx_no_buffer;
+ u64 tx_map_failed;
+ u64 tx_send_failed;
+ u64 tx_large_packets;
+ u64 rx_large_packets;
+ /* Ethtool settings */
u8 duplex;
u32 speed;
};
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index e95ae0d39948..92647e137cf8 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -234,11 +234,17 @@ static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
(*stragglers)--;
}
/* atomic write is safer than writing bit by bit directly */
- for (i = 0; i < stride; i++) {
- cpumask_set_cpu(*cpu, mask);
- *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
- nr_cpu_ids, false);
+ for_each_online_cpu_wrap(i, *cpu) {
+ if (!stride--) {
+ /* For the next queue we start from the first
+ * unused CPU in this queue
+ */
+ *cpu = i;
+ break;
+ }
+ cpumask_set_cpu(i, mask);
}
+
/* set queue affinity mask */
cpumask_copy(queue->affinity_mask, mask);
rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
@@ -256,7 +262,7 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
int total_queues, stride, stragglers, i;
- unsigned int num_cpu, cpu;
+ unsigned int num_cpu, cpu = 0;
bool is_rx_queue;
int rc = 0;
@@ -274,8 +280,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
stride = max_t(int, num_cpu / total_queues, 1);
/* number of leftover cpu's */
stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
- /* next available cpu to assign irq to */
- cpu = cpumask_next(-1, cpu_online_mask);
for (i = 0; i < total_queues; i++) {
is_rx_queue = false;
@@ -2408,6 +2412,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr_t data_dma_addr;
struct netdev_queue *txq;
unsigned long lpar_rc;
+ unsigned int skblen;
union sub_crq tx_crq;
unsigned int offset;
bool use_scrq_send_direct = false;
@@ -2522,6 +2527,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff->skb = skb;
tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
+ skblen = skb->len;
memset(&tx_crq, 0, sizeof(tx_crq));
tx_crq.v1.first = IBMVNIC_CRQ_CMD;
@@ -2614,7 +2620,7 @@ early_exit:
netif_stop_subqueue(netdev, queue_num);
}
- tx_bytes += skb->len;
+ tx_bytes += skblen;
txq_trans_cond_update(txq);
ret = NETDEV_TX_OK;
goto out;
@@ -4827,6 +4833,18 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
strscpy(vlcd->name, adapter->netdev->name, len);
}
+static void ibmvnic_print_hex_dump(struct net_device *dev, void *buf,
+ size_t len)
+{
+ unsigned char hex_str[16 * 3];
+
+ for (size_t i = 0; i < len; i += 16) {
+ hex_dump_to_buffer((unsigned char *)buf + i, len - i, 16, 8,
+ hex_str, sizeof(hex_str), false);
+ netdev_dbg(dev, "%s\n", hex_str);
+ }
+}
+
static int send_login(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
@@ -4937,10 +4955,8 @@ static int send_login(struct ibmvnic_adapter *adapter)
vnic_add_client_data(adapter, vlcd);
netdev_dbg(adapter->netdev, "Login Buffer:\n");
- for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(adapter->login_buf))[i]);
- }
+ ibmvnic_print_hex_dump(adapter->netdev, adapter->login_buf,
+ adapter->login_buf_sz);
memset(&crq, 0, sizeof(crq));
crq.login.first = IBMVNIC_CRQ_CMD;
@@ -5317,15 +5333,13 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
- int i;
dma_unmap_single(dev, adapter->ip_offload_tok,
sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
- for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(buf))[i]);
+ ibmvnic_print_hex_dump(adapter->netdev, buf,
+ sizeof(adapter->ip_offload_buf));
netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
@@ -5556,10 +5570,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
- for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(adapter->login_rsp_buf))[i]);
- }
+ ibmvnic_print_hex_dump(netdev, adapter->login_rsp_buf,
+ adapter->login_rsp_buf_sz);
/* Sanity checks */
if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 20bc40eec487..5a331c1c76cb 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -147,6 +147,8 @@ config IXGBE
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select MDIO
+ select NET_DEVLINK
+ select PLDMFW
select PHYLIB
help
This driver supports Intel(R) 10GbE PCI Express family of
@@ -264,6 +266,7 @@ config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support"
select IAVF
depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports virtual functions for Intel XL710,
X710, X722, XXV710, and all devices advertising support for
@@ -292,6 +295,7 @@ config ICE
select DIMLIB
select LIBIE
select NET_DEVLINK
+ select PACKING
select PLDMFW
select DPLL
help
@@ -335,7 +339,7 @@ config ICE_SWITCHDEV
config ICE_HWTS
bool "Support HW cross-timestamp on platforms with PTM support"
default y
- depends on ICE && X86
+ depends on ICE && X86 && PCIE_PTM
help
Say Y to enable hardware supported cross-timestamping on platforms
with PCIe PTM support. The cross-timestamp is available through
@@ -365,6 +369,7 @@ config IGC
default n
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on ETHTOOL_NETLINK
help
This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
family of adapters.
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 3a5bbda235cb..5c56c1edd492 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1682,7 +1682,7 @@ static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
static void e100_watchdog(struct timer_list *t)
{
- struct nic *nic = from_timer(nic, t, watchdog);
+ struct nic *nic = timer_container_of(nic, t, watchdog);
struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
u32 speed;
@@ -2293,7 +2293,7 @@ static int e100_up(struct nic *nic)
return 0;
err_no_irq:
- del_timer_sync(&nic->watchdog);
+ timer_delete_sync(&nic->watchdog);
err_clean_cbs:
e100_clean_cbs(nic);
err_rx_clean_list:
@@ -2308,7 +2308,7 @@ static void e100_down(struct nic *nic)
netif_stop_queue(nic->netdev);
e100_hw_reset(nic);
free_irq(nic->pdev->irq, nic->netdev);
- del_timer_sync(&nic->watchdog);
+ timer_delete_sync(&nic->watchdog);
netif_carrier_off(nic->netdev);
e100_clean_cbs(nic);
e100_rx_clean_list(nic);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3f089c3d47b2..d8595e84326d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -477,10 +477,6 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
cancel_delayed_work_sync(&adapter->phy_info_task);
cancel_delayed_work_sync(&adapter->fifo_stall_task);
-
- /* Only kill reset task if adapter is not resetting */
- if (!test_bit(__E1000_RESETTING, &adapter->flags))
- cancel_work_sync(&adapter->reset_task);
}
void e1000_down(struct e1000_adapter *adapter)
@@ -1266,6 +1262,10 @@ static void e1000_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ /* Only kill reset task if adapter is not resetting */
+ if (!test_bit(__E1000_RESETTING, &adapter->flags))
+ cancel_work_sync(&adapter->reset_task);
+
e1000_phy_hw_reset(hw);
kfree(adapter->tx_ring);
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 5e2cfa73f889..8294a7c4f122 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -803,4 +803,7 @@
/* SerDes Control */
#define E1000_GEN_POLL_TIMEOUT 640
+#define E1000_FEXTNVM12_PHYPD_CTRL_MASK 0x00C00000
+#define E1000_FEXTNVM12_PHYPD_CTRL_P1 0x00800000
+
#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ba9c19e6994c..952898151565 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -319,7 +319,7 @@ struct e1000_adapter {
u16 tx_ring_count;
u16 rx_ring_count;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
struct delayed_work systim_overflow_work;
struct sk_buff *tx_hwtstamp_skb;
unsigned long tx_hwtstamp_start;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 2f9655cf5dd9..364378133526 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -286,6 +286,45 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
}
/**
+ * e1000_reconfigure_k1_exit_timeout - reconfigure K1 exit timeout to
+ * align to MTP and later platform requirements.
+ * @hw: pointer to the HW structure
+ *
+ * Context: PHY semaphore must be held by caller.
+ * Return: 0 on success, negative on failure
+ */
+static s32 e1000_reconfigure_k1_exit_timeout(struct e1000_hw *hw)
+{
+ u16 phy_timeout;
+ u32 fextnvm12;
+ s32 ret_val;
+
+ if (hw->mac.type < e1000_pch_mtp)
+ return 0;
+
+ /* Change Kumeran K1 power down state from P0s to P1 */
+ fextnvm12 = er32(FEXTNVM12);
+ fextnvm12 &= ~E1000_FEXTNVM12_PHYPD_CTRL_MASK;
+ fextnvm12 |= E1000_FEXTNVM12_PHYPD_CTRL_P1;
+ ew32(FEXTNVM12, fextnvm12);
+
+ /* Wait for the interface the settle */
+ usleep_range(1000, 1100);
+
+ /* Change K1 exit timeout */
+ ret_val = e1e_rphy_locked(hw, I217_PHY_TIMEOUTS_REG,
+ &phy_timeout);
+ if (ret_val)
+ return ret_val;
+
+ phy_timeout &= ~I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK;
+ phy_timeout |= 0xF00;
+
+ return e1e_wphy_locked(hw, I217_PHY_TIMEOUTS_REG,
+ phy_timeout);
+}
+
+/**
* e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
* @hw: pointer to the HW structure
*
@@ -327,15 +366,22 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
* LANPHYPC Value bit to force the interconnect to PCIe mode.
*/
switch (hw->mac.type) {
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ case e1000_pch_nvp:
+ /* At this point the PHY might be inaccessible so don't
+ * propagate the failure
+ */
+ if (e1000_reconfigure_k1_exit_timeout(hw))
+ e_dbg("Failed to reconfigure K1 exit timeout\n");
+
+ fallthrough;
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
- case e1000_pch_nvp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -419,8 +465,20 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
* the PHY is in.
*/
ret_val = hw->phy.ops.check_reset_block(hw);
- if (ret_val)
+ if (ret_val) {
e_err("ME blocked access to PHY after reset\n");
+ goto out;
+ }
+
+ if (hw->mac.type >= e1000_pch_mtp) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_err("Failed to reconfigure K1 exit timeout\n");
+ goto out;
+ }
+ ret_val = e1000_reconfigure_k1_exit_timeout(hw);
+ hw->phy.ops.release(hw);
+ }
}
out:
@@ -4888,6 +4946,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
u16 i;
e1000_initialize_hw_bits_ich8lan(hw);
+ if (hw->mac.type >= e1000_pch_mtp) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_reconfigure_k1_exit_timeout(hw);
+ hw->phy.ops.release(hw);
+ if (ret_val) {
+ e_dbg("Error failed to reconfigure K1 exit timeout\n");
+ return ret_val;
+ }
+ }
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2504b11c3169..5feb589a9b5f 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -219,6 +219,10 @@
#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
#define I217_PLL_CLOCK_GATE_MASK 0x07FF
+/* PHY Timeouts */
+#define I217_PHY_TIMEOUTS_REG PHY_REG(770, 21)
+#define I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK 0x0FC0
+
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
/* Inband Control */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index d7df2a0ed629..44249dd91bd6 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -331,8 +331,21 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
}
/* replace the entire MTA table */
- for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) {
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ /*
+ * Do not queue up too many posted writes to prevent
+ * increased latency for other devices on the
+ * interconnect. Flush after each 8th posted write,
+ * to keep additional execution time low while still
+ * preventing increased latency.
+ */
+ if (!(i % 8) && i)
+ e1e_flush();
+ }
+ }
e1e_flush();
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 286155efcedf..7719e15813ee 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3534,9 +3534,6 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
case e1000_pch_nvp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
@@ -3552,6 +3549,17 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
adapter->cc.shift = shift;
}
break;
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ /* System firmware can misreport this value, so set it to a
+ * stable 38400KHz frequency.
+ */
+ incperiod = INCPERIOD_38400KHZ;
+ incvalue = INCVALUE_38400KHZ;
+ shift = INCVALUE_SHIFT_38400KHZ;
+ adapter->cc.shift = shift;
+ break;
case e1000_82574:
case e1000_82583:
/* Stable 25MHz frequency */
@@ -3574,6 +3582,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
* e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
* @adapter: board private structure
* @config: timestamp configuration
+ * @extack: netlink extended ACK for error report
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -3587,7 +3596,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
* exception of "all V2 events regardless of level 2 or 4".
**/
static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct e1000_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
@@ -3598,8 +3608,10 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
bool is_l2 = false;
u32 regval;
- if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) {
+ NL_SET_ERR_MSG(extack, "No HW timestamp support");
return -EINVAL;
+ }
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -3608,6 +3620,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
case HWTSTAMP_TX_ON:
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported TX HW timestamp type");
return -ERANGE;
}
@@ -3681,6 +3694,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported RX HW timestamp filter");
return -ERANGE;
}
@@ -3693,7 +3707,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
ew32(TSYNCTXCTL, regval);
if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
(regval & E1000_TSYNCTXCTL_ENABLED)) {
- e_err("Timesync Tx Control register not set as expected\n");
+ NL_SET_ERR_MSG(extack,
+ "Timesync Tx Control register not set as expected");
return -EAGAIN;
}
@@ -3706,7 +3721,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
E1000_TSYNCRXCTL_TYPE_MASK)) !=
(regval & (E1000_TSYNCRXCTL_ENABLED |
E1000_TSYNCRXCTL_TYPE_MASK))) {
- e_err("Timesync Rx Control register not set as expected\n");
+ NL_SET_ERR_MSG(extack,
+ "Timesync Rx Control register not set as expected");
return -EAGAIN;
}
@@ -3901,6 +3917,7 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
{
struct ptp_clock_info *info = &adapter->ptp_clock_info;
struct e1000_hw *hw = &adapter->hw;
+ struct netlink_ext_ack extack = {};
unsigned long flags;
u32 timinca;
s32 ret_val;
@@ -3932,7 +3949,12 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
spin_unlock_irqrestore(&adapter->systim_lock, flags);
/* restore the previous hwtstamp configuration settings */
- e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+ ret_val = e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config,
+ &extack);
+ if (ret_val) {
+ if (extack._msg)
+ e_err("%s\n", extack._msg);
+ }
}
/**
@@ -4287,8 +4309,8 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
napi_synchronize(&adapter->napi);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
@@ -4839,7 +4861,8 @@ static void e1000e_update_phy_task(struct work_struct *work)
**/
static void e1000_update_phy_info(struct timer_list *t)
{
- struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+ struct e1000_adapter *adapter = timer_container_of(adapter, t,
+ phy_info_timer);
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -5175,7 +5198,8 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
**/
static void e1000_watchdog(struct timer_list *t)
{
- struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct e1000_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -6079,8 +6103,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd)
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(ifr);
@@ -6140,7 +6163,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
/**
* e1000e_hwtstamp_set - control hardware time stamping
* @netdev: network interface device structure
- * @ifr: interface request
+ * @config: timestamp configuration
+ * @extack: netlink extended ACK report
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -6153,20 +6177,18 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
-static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
int ret_val;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- ret_val = e1000e_config_hwtstamp(adapter, &config);
+ ret_val = e1000e_config_hwtstamp(adapter, config, extack);
if (ret_val)
return ret_val;
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
@@ -6178,38 +6200,23 @@ static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
* by hardware so notify the caller the requested packets plus
* some others are time stamped.
*/
- config.rx_filter = HWTSTAMP_FILTER_SOME;
+ config->rx_filter = HWTSTAMP_FILTER_SOME;
break;
default:
break;
}
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
-static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *kernel_config)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
- sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
-}
+ *kernel_config = adapter->hwtstamp_config;
-static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return e1000_mii_ioctl(netdev, ifr, cmd);
- case SIOCSHWTSTAMP:
- return e1000e_hwtstamp_set(netdev, ifr);
- case SIOCGHWTSTAMP:
- return e1000e_hwtstamp_get(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ return 0;
}
static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
@@ -7346,9 +7353,11 @@ static const struct net_device_ops e1000e_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = e1000_netpoll,
#endif
- .ndo_set_features = e1000_set_features,
- .ndo_fix_features = e1000_fix_features,
+ .ndo_set_features = e1000_set_features,
+ .ndo_fix_features = e1000_fix_features,
.ndo_features_check = passthru_features_check,
+ .ndo_hwtstamp_get = e1000e_hwtstamp_get,
+ .ndo_hwtstamp_set = e1000e_hwtstamp_set,
};
/**
@@ -7741,8 +7750,8 @@ static void e1000_remove(struct pci_dev *pdev)
* from being rescheduled.
*/
set_bit(__E1000_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 89d57dd911dc..ea3c3eb2ef20 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -295,15 +295,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
case e1000_pch_nvp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
else
adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
break;
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
+ break;
case e1000_82574:
case e1000_82583:
adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 92de609b7218..ae5fe34659cf 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -199,8 +199,8 @@ static void fm10k_start_service_event(struct fm10k_intfc *interface)
**/
static void fm10k_service_timer(struct timer_list *t)
{
- struct fm10k_intfc *interface = from_timer(interface, t,
- service_timer);
+ struct fm10k_intfc *interface = timer_container_of(interface, t,
+ service_timer);
/* Reset the timer */
mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
@@ -2245,7 +2245,7 @@ static void fm10k_remove(struct pci_dev *pdev)
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct net_device *netdev = interface->netdev;
- del_timer_sync(&interface->service_timer);
+ timer_delete_sync(&interface->service_timer);
fm10k_stop_service_event(interface);
fm10k_stop_macvlan_task(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 98861cc6df7c..b9dd7b719832 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1180,126 +1180,6 @@ s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
}
/**
- * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
- * @hw: Pointer to hardware structure
- * @results: Pointer array to message, results[0] is pointer to message
- * @mbx: Pointer to mailbox information structure
- *
- * This function is a default handler for MAC/VLAN requests from the VF.
- * The assumption is that in this case it is acceptable to just directly
- * hand off the message from the VF to the underlying shared code.
- **/
-s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
-{
- struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
- u8 mac[ETH_ALEN];
- u32 *result;
- int err = 0;
- bool set;
- u16 vlan;
- u32 vid;
-
- /* we shouldn't be updating rules on a disabled interface */
- if (!FM10K_VF_FLAG_ENABLED(vf_info))
- err = FM10K_ERR_PARAM;
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
- result = results[FM10K_MAC_VLAN_MSG_VLAN];
-
- /* record VLAN id requested */
- err = fm10k_tlv_attr_get_u32(result, &vid);
- if (err)
- return err;
-
- set = !(vid & FM10K_VLAN_CLEAR);
- vid &= ~FM10K_VLAN_CLEAR;
-
- /* if the length field has been set, this is a multi-bit
- * update request. For multi-bit requests, simply disallow
- * them when the pf_vid has been set. In this case, the PF
- * should have already cleared the VLAN_TABLE, and if we
- * allowed them, it could allow a rogue VF to receive traffic
- * on a VLAN it was not assigned. In the single-bit case, we
- * need to modify requests for VLAN 0 to use the default PF or
- * SW vid when assigned.
- */
-
- if (vid >> 16) {
- /* prevent multi-bit requests when PF has
- * administratively set the VLAN for this VF
- */
- if (vf_info->pf_vid)
- return FM10K_ERR_PARAM;
- } else {
- err = fm10k_iov_select_vid(vf_info, (u16)vid);
- if (err < 0)
- return err;
-
- vid = err;
- }
-
- /* update VSI info for VF in regards to VLAN table */
- err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
- }
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
- result = results[FM10K_MAC_VLAN_MSG_MAC];
-
- /* record unicast MAC address requested */
- err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
- if (err)
- return err;
-
- /* block attempts to set MAC for a locked device */
- if (is_valid_ether_addr(vf_info->mac) &&
- !ether_addr_equal(mac, vf_info->mac))
- return FM10K_ERR_PARAM;
-
- set = !(vlan & FM10K_VLAN_CLEAR);
- vlan &= ~FM10K_VLAN_CLEAR;
-
- err = fm10k_iov_select_vid(vf_info, vlan);
- if (err < 0)
- return err;
-
- vlan = (u16)err;
-
- /* notify switch of request for new unicast address */
- err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
- mac, vlan, set, 0);
- }
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
- result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
-
- /* record multicast MAC address requested */
- err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
- if (err)
- return err;
-
- /* verify that the VF is allowed to request multicast */
- if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
- return FM10K_ERR_PARAM;
-
- set = !(vlan & FM10K_VLAN_CLEAR);
- vlan &= ~FM10K_VLAN_CLEAR;
-
- err = fm10k_iov_select_vid(vf_info, vlan);
- if (err < 0)
- return err;
-
- vlan = (u16)err;
-
- /* notify switch of request for new multicast address */
- err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
- mac, vlan, set);
- }
-
- return err;
-}
-
-/**
* fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
* @vf_info: VF info structure containing capability flags
* @mode: Requested xcast mode
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index 8e814df709d2..ad3696893cb1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -99,8 +99,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid);
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
-s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
- struct fm10k_mbx_info *);
s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d4255c2706fa..c67963bfe14e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -88,6 +88,7 @@ enum i40e_state {
__I40E_SERVICE_SCHED,
__I40E_ADMINQ_EVENT_PENDING,
__I40E_MDD_EVENT_PENDING,
+ __I40E_MDD_VF_PRINT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
__I40E_TIMEOUT_RECOVERY_PENDING,
@@ -191,6 +192,7 @@ enum i40e_pf_flags {
*/
I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
I40E_FLAG_VF_VLAN_PRUNING_ENA,
+ I40E_FLAG_MDD_AUTO_RESET_VF,
I40E_PF_FLAGS_NBITS, /* must be last */
};
@@ -572,7 +574,7 @@ struct i40e_pf {
int num_alloc_vfs; /* actual number of VFs allocated */
u32 vf_aq_requests;
u32 arq_overflows; /* Not fatal, possibly indicative of problems */
-
+ struct ratelimit_state mdd_message_rate_limit;
/* DCBx/DCBNL capability for PF that indicates
* whether DCBx is managed by firmware or host
* based agent (LLDPAD). Also, indicates what
@@ -1189,7 +1191,6 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
u32 i40e_get_current_fd_count(struct i40e_pf *pf);
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
u32 i40e_get_global_fd_count(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
@@ -1197,7 +1198,6 @@ void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
const u8 *macaddr, s16 vlan);
void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
-void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
@@ -1313,7 +1313,6 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
int i40e_get_partition_bw_setting(struct i40e_pf *pf);
int i40e_set_partition_bw_setting(struct i40e_pf *pf);
-int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index f73f5930fc58..175c1320c143 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1016,16 +1016,6 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
return status;
}
-int
-i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */ u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
-{
- return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
- cmd_details, true, aq_status);
-}
-
/**
* i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index e8031f1a9b4f..b11c35e307ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -817,10 +817,11 @@ int i40e_pf_reset(struct i40e_hw *hw)
void i40e_clear_hw(struct i40e_hw *hw)
{
u32 num_queues, base_queue;
- u32 num_pf_int;
- u32 num_vf_int;
+ s32 num_pf_int;
+ s32 num_vf_int;
u32 num_vfs;
- u32 i, j;
+ s32 i;
+ u32 j;
u32 val;
u32 eol = 0x7ff;
@@ -1805,37 +1806,6 @@ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
}
/**
- * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
- * @hw: pointer to the hw struct
- * @seid: vsi number
- * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
- * @cmd_details: pointer to command details structure or NULL
- **/
-int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- u16 flags = 0;
- int status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_vsi_promiscuous_modes);
- if (enable)
- flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
-
- cmd->promiscuous_flags = cpu_to_le16(flags);
- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
- cmd->seid = cpu_to_le16(seid);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -2436,136 +2406,6 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
}
/**
- * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
- * @hw: pointer to the hw struct
- * @opcode: AQ opcode for add or delete mirror rule
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @id: Destination VSI SEID or Rule ID
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
- * VEBs/VEPA elements only
- **/
-static int i40e_mirrorrule_op(struct i40e_hw *hw,
- u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
- u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_delete_mirror_rule *cmd =
- (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
- struct i40e_aqc_add_delete_mirror_rule_completion *resp =
- (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
- u16 buf_size;
- int status;
-
- buf_size = count * sizeof(*mr_list);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, opcode);
- cmd->seid = cpu_to_le16(sw_seid);
- cmd->rule_type = cpu_to_le16(rule_type &
- I40E_AQC_MIRROR_RULE_TYPE_MASK);
- cmd->num_entries = cpu_to_le16(count);
- /* Dest VSI for add, rule_id for delete */
- cmd->destination = cpu_to_le16(id);
- if (mr_list) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- }
-
- status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
- cmd_details);
- if (!status ||
- hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
- if (rule_id)
- *rule_id = le16_to_cpu(resp->rule_id);
- if (rules_used)
- *rules_used = le16_to_cpu(resp->mirror_rules_used);
- if (rules_free)
- *rules_free = le16_to_cpu(resp->mirror_rules_free);
- }
- return status;
-}
-
-/**
- * i40e_aq_add_mirrorrule - add a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @dest_vsi: SEID of VSI to which packets will be mirrored
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
- **/
-int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count,
- __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
- if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
- rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
- if (count == 0 || !mr_list)
- return -EINVAL;
- }
-
- return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
- rule_type, dest_vsi, count, mr_list,
- cmd_details, rule_id, rules_used, rules_free);
-}
-
-/**
- * i40e_aq_delete_mirrorrule - delete a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @count: length of the list
- * @rule_id: Rule ID that is returned in the receive desc as part of
- * add_mirrorrule.
- * @mr_list: list of mirrored VLAN IDs to be removed
- * @cmd_details: pointer to command details structure or NULL
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
- **/
-int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count,
- __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free)
-{
- /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- /* count and mr_list shall be valid for rule_type INGRESS VLAN
- * mirroring. For other rule_type, count and rule_type should
- * not matter.
- */
- if (count == 0 || !mr_list)
- return -EINVAL;
- }
-
- return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
- rule_type, rule_id, count, mr_list,
- cmd_details, NULL, rules_used, rules_free);
-}
-
-/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: VF id to send msg
@@ -3180,41 +3020,6 @@ i40e_aq_update_nvm_exit:
}
/**
- * i40e_aq_rearrange_nvm
- * @hw: pointer to the hw struct
- * @rearrange_nvm: defines direction of rearrangement
- * @cmd_details: pointer to command details structure or NULL
- *
- * Rearrange NVM structure, available only for transition FW
- **/
-int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aqc_nvm_update *cmd;
- struct i40e_aq_desc desc;
- int status;
-
- cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
-
- rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
- I40E_AQ_NVM_REARRANGE_TO_STRUCT);
-
- if (!rearrange_nvm) {
- status = -EINVAL;
- goto i40e_aq_rearrange_nvm_exit;
- }
-
- cmd->command_flags |= rearrange_nvm;
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-i40e_aq_rearrange_nvm_exit:
- return status;
-}
-
-/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
* @bridge_type: type of bridge requested
@@ -3335,44 +3140,6 @@ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
}
/**
- * i40e_aq_restore_lldp
- * @hw: pointer to the hw struct
- * @setting: pointer to factory setting variable or NULL
- * @restore: True if factory settings should be restored
- * @cmd_details: pointer to command details structure or NULL
- *
- * Restore LLDP Agent factory settings if @restore set to True. In other case
- * only returns factory setting in AQ response.
- **/
-int
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_restore *cmd =
- (struct i40e_aqc_lldp_restore *)&desc.params.raw;
- int status;
-
- if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
- i40e_debug(hw, I40E_DEBUG_ALL,
- "Restore LLDP not supported by current FW version.\n");
- return -ENODEV;
- }
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
-
- if (restore)
- cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (setting)
- *setting = cmd->command & 1;
-
- return status;
-}
-
-/**
* i40e_aq_stop_lldp
* @hw: pointer to the hw struct
* @shutdown_agent: True if LLDP Agent needs to be Shutdown
@@ -4570,84 +4337,6 @@ phy_write_end:
}
/**
- * i40e_write_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Writes value to specified PHY register
- **/
-int i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
-{
- int status;
-
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
- value);
- break;
- case I40E_DEV_ID_1G_BASE_T_BC:
- case I40E_DEV_ID_5G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_10G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- status = i40e_write_phy_register_clause45(hw, page, reg,
- phy_addr, value);
- break;
- default:
- status = -EIO;
- break;
- }
-
- return status;
-}
-
-/**
- * i40e_read_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Reads specified PHY register value
- **/
-int i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
-{
- int status;
-
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
- value);
- break;
- case I40E_DEV_ID_1G_BASE_T_BC:
- case I40E_DEV_ID_5G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_10G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- status = i40e_read_phy_register_clause45(hw, page, reg,
- phy_addr, value);
- break;
- default:
- status = -EIO;
- break;
- }
-
- return status;
-}
-
-/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
@@ -4663,80 +4352,6 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
}
/**
- * i40e_blink_phy_link_led
- * @hw: pointer to the HW structure
- * @time: time how long led will blinks in secs
- * @interval: gap between LED on and off in msecs
- *
- * Blinks PHY link LED
- **/
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval)
-{
- u16 led_addr = I40E_PHY_LED_PROV_REG_1;
- u16 gpio_led_port;
- u8 phy_addr = 0;
- int status = 0;
- u16 led_ctl;
- u8 port_num;
- u16 led_reg;
- u32 i;
-
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
-
- for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
- led_addr++) {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
- if (status)
- goto phy_blinking_end;
- led_ctl = led_reg;
- if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
- led_reg = 0;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
- if (status)
- goto phy_blinking_end;
- break;
- }
- }
-
- if (time > 0 && interval > 0) {
- for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
- if (status)
- goto restore_config;
- if (led_reg & I40E_PHY_LED_MANUAL_ON)
- led_reg = 0;
- else
- led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
- if (status)
- goto restore_config;
- msleep(interval);
- }
- }
-
-restore_config:
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
-
-phy_blinking_end:
- return status;
-}
-
-/**
* i40e_led_get_reg - read LED register
* @hw: pointer to the HW structure
* @led_addr: LED register address
@@ -5269,39 +4884,6 @@ i40e_find_segment_in_package(u32 segment_type,
(struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
/**
- * i40e_find_section_in_profile
- * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
- * @profile: pointer to the i40e segment header to be searched
- *
- * This function searches i40e segment for a particular section type. On
- * success it returns a pointer to the section header, otherwise it will
- * return NULL.
- **/
-struct i40e_profile_section_header *
-i40e_find_section_in_profile(u32 section_type,
- struct i40e_profile_segment *profile)
-{
- struct i40e_profile_section_header *sec;
- struct i40e_section_table *sec_tbl;
- u32 sec_off;
- u32 i;
-
- if (profile->header.type != SEGMENT_TYPE_I40E)
- return NULL;
-
- I40E_SECTION_TABLE(profile, sec_tbl);
-
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec_off = sec_tbl->section_offset[i];
- sec = I40E_SECTION_HEADER(profile, sec_off);
- if (sec->section.type == section_type)
- return sec;
- }
-
- return NULL;
-}
-
-/**
* i40e_ddp_exec_aq_section - Execute generic AQ for DDP
* @hw: pointer to the hw struct
* @aq: command buffer containing all data to execute AQ
@@ -5524,45 +5106,6 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
}
/**
- * i40e_add_pinfo_to_list
- * @hw: pointer to the hardware structure
- * @profile: pointer to the profile segment of the package
- * @profile_info_sec: buffer for information section
- * @track_id: package tracking id
- *
- * Register a profile to the list of loaded profiles.
- */
-int
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id)
-{
- struct i40e_profile_section_header *sec = NULL;
- struct i40e_profile_info *pinfo;
- u32 offset = 0, info = 0;
- int status = 0;
-
- sec = (struct i40e_profile_section_header *)profile_info_sec;
- sec->tbl_size = 1;
- sec->data_end = sizeof(struct i40e_profile_section_header) +
- sizeof(struct i40e_profile_info);
- sec->section.type = SECTION_TYPE_INFO;
- sec->section.offset = sizeof(struct i40e_profile_section_header);
- sec->section.size = sizeof(struct i40e_profile_info);
- pinfo = (struct i40e_profile_info *)(profile_info_sec +
- sec->section.offset);
- pinfo->track_id = track_id;
- pinfo->version = profile->version;
- pinfo->op = I40E_DDP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
-
- status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
- track_id, &offset, &info, NULL);
-
- return status;
-}
-
-/**
* i40e_aq_add_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 8db1eb0c1768..352e957443fd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1491,19 +1491,6 @@ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc)
}
/**
- * i40e_dcb_hw_get_num_tc
- * @hw: pointer to the hw struct
- *
- * Returns number of traffic classes configured in HW
- **/
-u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw)
-{
- u32 reg = rd32(hw, I40E_PRTDCB_GENC);
-
- return FIELD_GET(I40E_PRTDCB_GENC_NUMTC_MASK, reg);
-}
-
-/**
* i40e_dcb_hw_rx_ets_bw_config
* @hw: pointer to the hw struct
* @bw_share: Bandwidth share indexed per traffic class
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index d76497566e40..d5662c639c41 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -253,7 +253,6 @@ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
u8 pfc_en, u8 *prio_tc);
void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc);
-u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw);
void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
u8 *mode, u8 *prio_type);
void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 208c2f0857b6..6cd9da662ae1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -722,7 +722,7 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
dev_info(&pf->pdev->dev, " num MDD=%lld\n",
- vf->num_mdd_events);
+ vf->mdd_tx_events.count + vf->mdd_rx_events.count);
} else {
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index bce5b76f1e7a..8a7a83f83ee5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -459,6 +459,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
I40E_PRIV_FLAG("vf-vlan-pruning",
I40E_FLAG_VF_VLAN_PRUNING_ENA, 0),
+ I40E_PRIV_FLAG("mdd-auto-reset-vf",
+ I40E_FLAG_MDD_AUTO_RESET_VF, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ab5febf83ec3..f1c9e575703e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -99,7 +99,7 @@ module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
-MODULE_IMPORT_NS(LIBIE);
+MODULE_IMPORT_NS("LIBIE");
MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
@@ -1666,9 +1666,8 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* @vsi: VSI to remove from
* @f: the filter to remove from the list
*
- * This function should be called instead of i40e_del_filter only if you know
- * the exact filter you will remove already, such as via i40e_find_filter or
- * i40e_find_mac.
+ * This function requires you've found * the exact filter you will remove
+ * already, such as via i40e_find_filter or i40e_find_mac.
*
* NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
@@ -1698,29 +1697,6 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
}
/**
- * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
- * @vsi: the VSI to be searched
- * @macaddr: the MAC address
- * @vlan: the VLAN
- *
- * NOTE: This function is expected to be called with mac_filter_hash_lock
- * being held.
- * ANOTHER NOTE: This function MUST be called from within the context of
- * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
- * instead of list_for_each_entry().
- **/
-void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
-{
- struct i40e_mac_filter *f;
-
- if (!vsi || !macaddr)
- return;
-
- f = i40e_find_filter(vsi, macaddr, vlan);
- __i40e_del_filter(vsi, f);
-}
-
-/**
* i40e_add_mac_filter - Add a MAC filter for all active VLANs
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
@@ -9629,19 +9605,6 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
}
/**
- * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
- * @pf: board private structure
- **/
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
-{
- u32 val, fcnt_prog;
-
- val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
- fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
- return fcnt_prog;
-}
-
-/**
* i40e_get_current_fd_count - Get total FD filters programmed for this PF
* @pf: board private structure
**/
@@ -11217,6 +11180,67 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
}
/**
+ * i40e_print_vf_mdd_event - print VF Tx/Rx malicious driver detect event
+ * @pf: board private structure
+ * @vf: pointer to the VF structure
+ * @is_tx: true - for Tx event, false - for Rx
+ */
+static void i40e_print_vf_mdd_event(struct i40e_pf *pf, struct i40e_vf *vf,
+ bool is_tx)
+{
+ dev_err(&pf->pdev->dev, is_tx ?
+ "%lld Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n" :
+ "%lld Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n",
+ is_tx ? vf->mdd_tx_events.count : vf->mdd_rx_events.count,
+ pf->hw.pf_id,
+ vf->vf_id,
+ vf->default_lan_addr.addr,
+ str_on_off(test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)));
+}
+
+/**
+ * i40e_print_vfs_mdd_events - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from i40e_handle_mdd_event to rate limit and print VFs MDD events.
+ */
+static void i40e_print_vfs_mdd_events(struct i40e_pf *pf)
+{
+ unsigned int i;
+
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state))
+ return;
+
+ if (!__ratelimit(&pf->mdd_message_rate_limit))
+ return;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ struct i40e_vf *vf = &pf->vf[i];
+ bool is_printed = false;
+
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed = vf->mdd_rx_events.count;
+ i40e_print_vf_mdd_event(pf, vf, false);
+ is_printed = true;
+ }
+
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count;
+ i40e_print_vf_mdd_event(pf, vf, true);
+ is_printed = true;
+ }
+
+ if (is_printed && !test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF #%d\n",
+ i);
+ }
+}
+
+/**
* i40e_handle_mdd_event
* @pf: pointer to the PF structure
*
@@ -11230,8 +11254,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u32 reg;
int i;
- if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__I40E_MDD_EVENT_PENDING, pf->state)) {
+ /* Since the VF MDD event logging is rate limited, check if
+ * there are pending MDD events.
+ */
+ i40e_print_vfs_mdd_events(pf);
return;
+ }
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
@@ -11275,36 +11304,48 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
/* see if one of the VFs needs its hand slapped */
for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ bool is_mdd_on_tx = false;
+ bool is_mdd_on_rx = false;
+
vf = &(pf->vf[i]);
reg = rd32(hw, I40E_VP_MDET_TX(i));
if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+ set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state);
wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
- vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
- i);
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
+ vf->mdd_tx_events.count++;
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ is_mdd_on_tx = true;
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+ set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state);
wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
- vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
- i);
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
+ vf->mdd_rx_events.count++;
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ is_mdd_on_rx = true;
+ }
+
+ if ((is_mdd_on_tx || is_mdd_on_rx) &&
+ test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
+ /* VF MDD event counters will be cleared by
+ * reset, so print the event prior to reset.
+ */
+ if (is_mdd_on_rx)
+ i40e_print_vf_mdd_event(pf, vf, false);
+ if (is_mdd_on_tx)
+ i40e_print_vf_mdd_event(pf, vf, true);
+
+ i40e_vc_reset_vf(vf, true);
}
}
- /* re-enable mdd interrupt cause */
- clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
i40e_flush(hw);
+
+ i40e_print_vfs_mdd_events(pf);
}
/**
@@ -11371,7 +11412,7 @@ static void i40e_service_task(struct work_struct *work)
**/
static void i40e_service_timer(struct timer_list *t)
{
- struct i40e_pf *pf = from_timer(pf, t, service_timer);
+ struct i40e_pf *pf = timer_container_of(pf, t, service_timer);
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
@@ -12614,89 +12655,6 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf)
}
/**
- * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
- * @pf: board private structure
- **/
-int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
-{
- /* Commit temporary BW setting to permanent NVM image */
- enum i40e_admin_queue_err last_aq_status;
- u16 nvm_word;
- int ret;
-
- if (pf->hw.partition_id != 1) {
- dev_info(&pf->pdev->dev,
- "Commit BW only works on partition 1! This is partition %d",
- pf->hw.partition_id);
- ret = -EOPNOTSUPP;
- goto bw_commit_out;
- }
-
- /* Acquire NVM for read access */
- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
- last_aq_status = pf->hw.aq.asq_last_status;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
-
- /* Read word 0x10 of NVM - SW compatibility word 1 */
- ret = i40e_aq_read_nvm(&pf->hw,
- I40E_SR_NVM_CONTROL_WORD,
- 0x10, sizeof(nvm_word), &nvm_word,
- false, NULL);
- /* Save off last admin queue command status before releasing
- * the NVM
- */
- last_aq_status = pf->hw.aq.asq_last_status;
- i40e_release_nvm(&pf->hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
-
- /* Wait a bit for NVM release to complete */
- msleep(50);
-
- /* Acquire NVM for write access */
- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
- last_aq_status = pf->hw.aq.asq_last_status;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
- /* Write it back out unchanged to initiate update NVM,
- * which will force a write of the shadow (alt) RAM to
- * the NVM - thus storing the bandwidth values permanently.
- */
- ret = i40e_aq_update_nvm(&pf->hw,
- I40E_SR_NVM_CONTROL_WORD,
- 0x10, sizeof(nvm_word),
- &nvm_word, true, 0, NULL);
- /* Save off last admin queue command status before releasing
- * the NVM
- */
- last_aq_status = pf->hw.aq.asq_last_status;
- i40e_release_nvm(&pf->hw);
- if (ret)
- dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
-bw_commit_out:
-
- return ret;
-}
-
-/**
* i40e_is_total_port_shutdown_enabled - read NVM and return value
* if total port shutdown feature is enabled for this PF
* @pf: board private structure
@@ -15998,6 +15956,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* VF MDD event logs are rate limited to one second intervals */
+ ratelimit_state_init(&pf->mdd_message_rate_limit, 1 * HZ, 1);
+
/* Reconfigure hardware for allowing smaller MSS in the case
* of TSO, so that we avoid the MDD being fired and causing
* a reset in the case of small MSS+TSO.
@@ -16421,7 +16382,7 @@ static int i40e_io_suspend(struct i40e_pf *pf)
set_bit(__I40E_DOWN, pf->state);
/* Ensure service task will not be running */
- del_timer_sync(&pf->service_timer);
+ timer_delete_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
/* Client close must be called explicitly here because the timer
@@ -16620,7 +16581,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
- del_timer_sync(&pf->service_timer);
+ timer_delete_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
i40e_cloud_filter_exit(pf);
i40e_fdir_teardown(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 5a0699ca7ce5..099bb8ab7d70 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -27,13 +27,6 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
int
-i40e_asq_send_command_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
-int
i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -72,8 +65,6 @@ int i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode);
int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 *val);
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
/* admin send queue commands */
@@ -141,9 +132,6 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -176,14 +164,6 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
enum i40e_admin_queue_err *aq_status);
-int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free);
-int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free);
int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
@@ -220,9 +200,6 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
@@ -234,9 +211,6 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
-int
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
bool persist,
struct i40e_asq_cmd_details *cmd_details);
@@ -458,13 +432,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
int i40e_write_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
-int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 *value);
-int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
@@ -477,20 +445,12 @@ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
-struct i40e_profile_section_header *
-i40e_find_section_in_profile(u32 section_type,
- struct i40e_profile_segment *profile);
int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-int
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id);
-
/* i40e_ddp */
int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index dfa785e39458..88e6bef69342 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -216,7 +216,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
* @notify_vf: notify vf about reset or not
* Reset VF handler.
**/
-static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
+void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
{
struct i40e_pf *pf = vf->pf;
int i;
@@ -1546,8 +1546,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * Returns true if the VF is in reset, resets successfully, or resets
- * are disabled and false otherwise.
+ * Return: True if reset was performed successfully or if resets are disabled.
+ * False if reset is already in progress.
**/
bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
@@ -1566,7 +1566,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
/* If VF is being reset already we don't need to continue. */
if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
- return true;
+ return false;
i40e_trigger_vf_reset(vf, flr);
@@ -4328,7 +4328,10 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
if (reg & BIT(bit_idx))
/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
- i40e_reset_vf(vf, true);
+ if (!i40e_reset_vf(vf, true)) {
+ /* At least one VF did not finish resetting, retry next time */
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ }
}
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 66f95e2f3146..5cf74f16f433 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -64,6 +64,12 @@ struct i40evf_channel {
u64 max_tx_rate; /* bandwidth rate allocation for VSIs */
};
+struct i40e_mdd_vf_events {
+ u64 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u64 last_printed;
+};
+
/* VF information structure */
struct i40e_vf {
struct i40e_pf *pf;
@@ -92,7 +98,9 @@ struct i40e_vf {
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u8 num_req_queues; /* num of requested qps */
- u64 num_mdd_events; /* num of mdd events detected */
+ /* num of mdd tx and rx events detected */
+ struct i40e_mdd_vf_events mdd_rx_events;
+ struct i40e_mdd_vf_events mdd_tx_events;
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
@@ -120,6 +128,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
+void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf);
bool i40e_reset_vf(struct i40e_vf *vf, bool flr);
bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 4e885df789ef..9f47388eaba5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
#include <net/xdp_sock_drv.h>
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
@@ -395,32 +396,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
WARN_ON_ONCE(1);
}
-static int
-i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
@@ -486,8 +461,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
if (!first)
first = bi;
- else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
+ else if (!xsk_buff_add_frag(first, bi)) {
+ xsk_buff_free(first);
break;
+ }
if (++next_to_process == count)
next_to_process = 0;
@@ -553,7 +530,8 @@ static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *des
dma_addr_t dma;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]);
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index ef156fad52f2..dd16351a7af8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
-/* This value should match the pragma in the loop_unrolled_for
+/* This value should match the pragma in the unrolled_count()
* macro. Why 4? It is strictly empirical. It seems to be a good
* compromise between the advantage of having simultaneous outstanding
* reads to the DMA array that can hide each others latency and the
@@ -14,14 +14,6 @@
*/
#define PKTS_PER_BATCH 4
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(4)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 4") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct i40e_ring;
struct i40e_vsi;
struct net_device;
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index 356ac9faa5bf..e13720a728ff 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_IAVF) += iavf.o
iavf-y := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
iavf_adv_rss.o iavf_txrx.o iavf_common.o iavf_adminq.o
+
+iavf-$(CONFIG_PTP_1588_CLOCK) += iavf_ptp.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 532a0a595fe8..f7a98ff43a57 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -41,6 +41,7 @@
#include "iavf_txrx.h"
#include "iavf_fdir.h"
#include "iavf_adv_rss.h"
+#include "iavf_types.h"
#include <linux/bitmap.h>
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
@@ -82,7 +83,7 @@ struct iavf_vsi {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-#define IAVF_RX_DESC(R, i) (&(((union iavf_32byte_rx_desc *)((R)->desc))[i]))
+#define IAVF_RX_DESC(R, i) (&(((struct iavf_rx_desc *)((R)->desc))[i]))
#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
#define IAVF_TX_CTXTDESC(R, i) \
(&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
@@ -267,10 +268,10 @@ struct iavf_adapter {
struct list_head vlan_filter_list;
int num_vlan_filters;
struct list_head mac_filter_list;
- struct mutex crit_lock;
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
+ u8 rxdid;
int num_active_queues;
int num_req_queues;
@@ -343,6 +344,17 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW BIT_ULL(39)
#define IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE BIT_ULL(40)
#define IAVF_FLAG_AQ_GET_QOS_CAPS BIT_ULL(41)
+#define IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS BIT_ULL(42)
+#define IAVF_FLAG_AQ_GET_PTP_CAPS BIT_ULL(43)
+#define IAVF_FLAG_AQ_SEND_PTP_CMD BIT_ULL(44)
+
+ /* AQ messages that must be sent after IAVF_FLAG_AQ_GET_CONFIG, in
+ * order to negotiated extended capabilities.
+ */
+#define IAVF_FLAG_AQ_EXTENDED_CAPS \
+ (IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS | \
+ IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS | \
+ IAVF_FLAG_AQ_GET_PTP_CAPS)
/* flags for processing extended capability messages during
* __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires
@@ -354,10 +366,18 @@ struct iavf_adapter {
u64 extended_caps;
#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0)
#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1)
+#define IAVF_EXTENDED_CAP_SEND_RXDID BIT_ULL(2)
+#define IAVF_EXTENDED_CAP_RECV_RXDID BIT_ULL(3)
+#define IAVF_EXTENDED_CAP_SEND_PTP BIT_ULL(4)
+#define IAVF_EXTENDED_CAP_RECV_PTP BIT_ULL(5)
#define IAVF_EXTENDED_CAPS \
(IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
- IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_SEND_RXDID | \
+ IAVF_EXTENDED_CAP_RECV_RXDID | \
+ IAVF_EXTENDED_CAP_SEND_PTP | \
+ IAVF_EXTENDED_CAP_RECV_PTP)
/* Lock to prevent possible clobbering of
* current_netdev_promisc_flags
@@ -417,12 +437,18 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
#define QOS_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_QOS)
+#define IAVF_RXDID_ALLOWED(a) \
+ ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+#define IAVF_PTP_ALLOWED(a) \
+ ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1))
struct virtchnl_vlan_caps vlan_v2_caps;
+ u64 supp_rxdids;
+ struct iavf_ptp ptp;
u16 msg_enable;
struct iavf_eth_stats current_stats;
struct virtchnl_qos_cap_list *qos_caps;
@@ -555,6 +581,10 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
int iavf_get_vf_config(struct iavf_adapter *adapter);
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
+int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter);
+int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter);
void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 74a1e9fe1821..2b2b315205b5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -4,6 +4,8 @@
#include <linux/bitfield.h>
#include <linux/uaccess.h>
+#include <net/netdev_lock.h>
+
/* ethtool support for iavf */
#include "iavf.h"
@@ -1256,9 +1258,10 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
{
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
struct iavf_fdir_fltr *fltr;
- int count = 50;
int err;
+ netdev_assert_locked(adapter->netdev);
+
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
@@ -1277,14 +1280,6 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (!fltr)
return -ENOMEM;
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(fltr);
- return -EINVAL;
- }
- udelay(1);
- }
-
err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
if (!err)
err = iavf_fdir_add_fltr(adapter, fltr);
@@ -1292,7 +1287,6 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (err)
kfree(fltr);
- mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -1435,11 +1429,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
{
struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false;
- int count = 50, err = 0;
bool symm = false;
u64 hash_flds;
+ int err = 0;
u32 hdrs;
+ netdev_assert_locked(adapter->netdev);
+
if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP;
@@ -1463,15 +1459,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
return -EINVAL;
}
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(rss_new);
- return -EINVAL;
- }
-
- udelay(1);
- }
-
spin_lock_bh(&adapter->adv_rss_lock);
rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
if (rss_old) {
@@ -1500,8 +1487,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (!err)
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
- mutex_unlock(&adapter->crit_lock);
-
if (!rss_new_add)
kfree(rss_new);
@@ -1808,7 +1793,7 @@ static int iavf_set_rxfh(struct net_device *netdev,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
- .cap_rss_sym_xor_supported = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 12ef160425aa..81d7249d1149 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2,8 +2,10 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/net/intel/libie/rx.h>
+#include <net/netdev_lock.h>
#include "iavf.h"
+#include "iavf_ptp.h"
#include "iavf_prototype.h"
/* All iavf tracepoints are defined by the include below, which must
* be included exactly once across the whole kernel with
@@ -46,8 +48,8 @@ MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
MODULE_ALIAS("i40evf");
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
-MODULE_IMPORT_NS(LIBETH);
-MODULE_IMPORT_NS(LIBIE);
+MODULE_IMPORT_NS("LIBETH");
+MODULE_IMPORT_NS("LIBIE");
MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
@@ -710,6 +712,47 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)
}
/**
+ * iavf_select_rx_desc_format - Select Rx descriptor format
+ * @adapter: adapter private structure
+ *
+ * Select what Rx descriptor format based on availability and enabled
+ * features.
+ *
+ * Return: the desired RXDID to select for a given Rx queue, as defined by
+ * enum virtchnl_rxdid_format.
+ */
+static u8 iavf_select_rx_desc_format(const struct iavf_adapter *adapter)
+{
+ u64 rxdids = adapter->supp_rxdids;
+
+ /* If we did not negotiate VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, we must
+ * stick with the default value of the legacy 32 byte format.
+ */
+ if (!IAVF_RXDID_ALLOWED(adapter))
+ return VIRTCHNL_RXDID_1_32B_BASE;
+
+ /* Rx timestamping requires the use of flexible NIC descriptors */
+ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
+ if (rxdids & BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC))
+ return VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
+
+ pci_warn(adapter->pdev,
+ "Unable to negotiate flexible descriptor format\n");
+ }
+
+ /* Warn if the PF does not list support for the default legacy
+ * descriptor format. This shouldn't happen, as this is the format
+ * used if VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is not supported. It is
+ * likely caused by a bug in the PF implementation failing to indicate
+ * support for the format.
+ */
+ if (!(rxdids & VIRTCHNL_RXDID_1_32B_BASE_M))
+ netdev_warn(adapter->netdev, "PF does not list support for default Rx descriptor format\n");
+
+ return VIRTCHNL_RXDID_1_32B_BASE;
+}
+
+/**
* iavf_configure_rx - Configure Receive Unit after Reset
* @adapter: board private structure
*
@@ -719,8 +762,12 @@ static void iavf_configure_rx(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
- for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rxdid = iavf_select_rx_desc_format(adapter);
+
+ for (u32 i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
+ adapter->rx_rings[i].rxdid = adapter->rxdid;
+ }
}
/**
@@ -773,6 +820,11 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
f->state = IAVF_VLAN_ADD;
adapter->num_vlan_filters++;
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
+ } else if (f->state == IAVF_VLAN_REMOVE) {
+ /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed.
+ * We can safely only change the state here.
+ */
+ f->state = IAVF_VLAN_ACTIVE;
}
clearout:
@@ -793,8 +845,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
f = iavf_find_vlan(adapter, vlan);
if (f) {
- f->state = IAVF_VLAN_REMOVE;
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+ /* IAVF_ADD_VLAN means that VLAN wasn't even added yet.
+ * Remove it from the list.
+ */
+ if (f->state == IAVF_VLAN_ADD) {
+ list_del(&f->list);
+ kfree(f);
+ adapter->num_vlan_filters--;
+ } else {
+ f->state = IAVF_VLAN_REMOVE;
+ iavf_schedule_aq_request(adapter,
+ IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+ }
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -1180,7 +1242,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter)
q_vector = &adapter->q_vectors[q_idx];
napi = &q_vector->napi;
- napi_enable(napi);
+ napi_enable_locked(napi);
}
}
@@ -1196,7 +1258,7 @@ static void iavf_napi_disable_all(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = &adapter->q_vectors[q_idx];
- napi_disable(&q_vector->napi);
+ napi_disable_locked(&q_vector->napi);
}
}
@@ -1225,11 +1287,11 @@ static void iavf_configure(struct iavf_adapter *adapter)
/**
* iavf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
- *
- * Expects to be called while holding crit_lock.
- **/
+ */
static void iavf_up_complete(struct iavf_adapter *adapter)
{
+ netdev_assert_locked(adapter->netdev);
+
iavf_change_state(adapter, __IAVF_RUNNING);
clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
@@ -1348,13 +1410,13 @@ static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
/**
* iavf_down - Shutdown the connection processing
* @adapter: board private structure
- *
- * Expects to be called while holding crit_lock.
- **/
+ */
void iavf_down(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ netdev_assert_locked(netdev);
+
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
@@ -1800,8 +1862,8 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
q_vector->v_idx = q_idx;
q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
- netif_napi_add(adapter->netdev, &q_vector->napi,
- iavf_napi_poll);
+ netif_napi_add_locked(adapter->netdev, &q_vector->napi,
+ iavf_napi_poll);
}
return 0;
@@ -1827,7 +1889,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
- netif_napi_del(&q_vector->napi);
+ netif_napi_del_locked(&q_vector->napi);
}
kfree(adapter->q_vectors);
adapter->q_vectors = NULL;
@@ -1963,21 +2025,21 @@ err:
* iavf_finish_config - do all netdev work that needs RTNL
* @work: our work_struct
*
- * Do work that needs both RTNL and crit_lock.
- **/
+ * Do work that needs RTNL.
+ */
static void iavf_finish_config(struct work_struct *work)
{
struct iavf_adapter *adapter;
+ bool netdev_released = false;
int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config);
/* Always take RTNL first to prevent circular lock dependency;
- * The dev->lock is needed to update the queue number
+ * the dev->lock (== netdev lock) is needed to update the queue number.
*/
rtnl_lock();
- mutex_lock(&adapter->netdev->lock);
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(adapter->netdev);
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
adapter->netdev->reg_state == NETREG_REGISTERED &&
@@ -1988,26 +2050,33 @@ static void iavf_finish_config(struct work_struct *work)
switch (adapter->state) {
case __IAVF_DOWN:
+ /* Set the real number of queues when reset occurs while
+ * state == __IAVF_DOWN
+ */
+ pairs = adapter->num_active_queues;
+ netif_set_real_num_rx_queues(adapter->netdev, pairs);
+ netif_set_real_num_tx_queues(adapter->netdev, pairs);
+
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
+ netdev_unlock(adapter->netdev);
+ netdev_released = true;
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
err);
/* go back and try again.*/
+ netdev_lock(adapter->netdev);
iavf_free_rss(adapter);
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
iavf_change_state(adapter,
__IAVF_INIT_CONFIG_ADAPTER);
+ netdev_unlock(adapter->netdev);
goto out;
}
}
-
- /* Set the real number of queues when reset occurs while
- * state == __IAVF_DOWN
- */
- fallthrough;
+ break;
case __IAVF_RUNNING:
pairs = adapter->num_active_queues;
netif_set_real_num_rx_queues(adapter->netdev, pairs);
@@ -2019,8 +2088,8 @@ static void iavf_finish_config(struct work_struct *work)
}
out:
- mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&adapter->netdev->lock);
+ if (!netdev_released)
+ netdev_unlock(adapter->netdev);
rtnl_unlock();
}
@@ -2049,6 +2118,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
return iavf_send_vf_config_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
return iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS)
+ return iavf_send_vf_supported_rxdids_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_PTP_CAPS)
+ return iavf_send_vf_ptp_caps_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
iavf_disable_queues(adapter);
return 0;
@@ -2213,7 +2286,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
return 0;
}
-
+ if (adapter->aq_required & IAVF_FLAG_AQ_SEND_PTP_CMD) {
+ iavf_virtchnl_send_ptp_cmd(adapter);
+ return IAVF_SUCCESS;
+ }
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
iavf_request_stats(adapter);
return 0;
@@ -2578,6 +2654,112 @@ err:
}
/**
+ * iavf_init_send_supported_rxdids - part of querying for supported RXDID
+ * formats
+ * @adapter: board private structure
+ *
+ * Function processes send of the request for supported RXDIDs to the PF.
+ * Must clear IAVF_EXTENDED_CAP_RECV_RXDID if the message is not sent, e.g.
+ * due to the PF not negotiating VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC.
+ */
+static void iavf_init_send_supported_rxdids(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ ret = iavf_send_vf_supported_rxdids_msg(adapter);
+ if (ret == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC. In this
+ * case, we did not send the capability exchange message and
+ * do not expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_RXDID;
+}
+
+/**
+ * iavf_init_recv_supported_rxdids - part of querying for supported RXDID
+ * formats
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the supported RXDIDs message from the PF.
+ **/
+static void iavf_init_recv_supported_rxdids(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ memset(&adapter->supp_rxdids, 0, sizeof(adapter->supp_rxdids));
+
+ ret = iavf_get_vf_supported_rxdids(adapter);
+ if (ret)
+ goto err;
+
+ /* We've processed the PF response to the
+ * VIRTCHNL_OP_GET_SUPPORTED_RXDIDS message we sent previously.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
+ return;
+
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_RXDID;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
+ * iavf_init_send_ptp_caps - part of querying for extended PTP capabilities
+ * @adapter: board private structure
+ *
+ * Function processes send of the request for 1588 PTP capabilities to the PF.
+ * Must clear IAVF_EXTENDED_CAP_SEND_PTP if the message is not sent, e.g.
+ * due to the PF not negotiating VIRTCHNL_VF_PTP_CAP
+ */
+static void iavf_init_send_ptp_caps(struct iavf_adapter *adapter)
+{
+ if (iavf_send_vf_ptp_caps_msg(adapter) == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_PTP_CAP. In this case, we
+ * did not send the capability exchange message and do not
+ * expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_PTP;
+}
+
+/**
+ * iavf_init_recv_ptp_caps - part of querying for supported PTP capabilities
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the PTP capabilities supported on this VF.
+ **/
+static void iavf_init_recv_ptp_caps(struct iavf_adapter *adapter)
+{
+ memset(&adapter->ptp.hw_caps, 0, sizeof(adapter->ptp.hw_caps));
+
+ if (iavf_get_vf_ptp_caps(adapter))
+ goto err;
+
+ /* We've processed the PF response to the VIRTCHNL_OP_1588_PTP_GET_CAPS
+ * message we sent previously.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
+ return;
+
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_PTP;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
* iavf_init_process_extended_caps - Part of driver startup
* @adapter: board private structure
*
@@ -2601,6 +2783,24 @@ static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
return;
}
+ /* Process capability exchange for RXDID formats */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_RXDID) {
+ iavf_init_send_supported_rxdids(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_RXDID) {
+ iavf_init_recv_supported_rxdids(adapter);
+ return;
+ }
+
+ /* Process capability exchange for PTP features */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_PTP) {
+ iavf_init_send_ptp_caps(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_PTP) {
+ iavf_init_recv_ptp_caps(adapter);
+ return;
+ }
+
/* When we reach here, no further extended capabilities exchanges are
* necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
*/
@@ -2692,6 +2892,9 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
if (QOS_ALLOWED(adapter))
adapter->aq_required |= IAVF_FLAG_AQ_GET_QOS_CAPS;
+ /* Setup initial PTP configuration */
+ iavf_ptp_init(adapter);
+
iavf_schedule_finish_config(adapter);
return;
@@ -2704,24 +2907,15 @@ err:
iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
-/**
- * iavf_watchdog_task - Periodic call-back task
- * @work: pointer to work_struct
- **/
-static void iavf_watchdog_task(struct work_struct *work)
+static const int IAVF_NO_RESCHED = -1;
+
+/* return: msec delay for requeueing itself */
+static int iavf_watchdog_step(struct iavf_adapter *adapter)
{
- struct iavf_adapter *adapter = container_of(work,
- struct iavf_adapter,
- watchdog_task.work);
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE)
- return;
-
- goto restart_watchdog;
- }
+ netdev_assert_locked(adapter->netdev);
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED);
@@ -2729,34 +2923,19 @@ static void iavf_watchdog_task(struct work_struct *work)
switch (adapter->state) {
case __IAVF_STARTUP:
iavf_startup(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
+ return 30;
case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
+ return 30;
case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_EXTENDED_CAPS:
iavf_init_process_extended_caps(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_CONFIG_ADAPTER:
iavf_init_config_adapter(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK,
&adapter->crit_section)) {
@@ -2764,24 +2943,18 @@ static void iavf_watchdog_task(struct work_struct *work)
* watchdog task, iavf_remove should handle this state
* as it can loop forever
*/
- mutex_unlock(&adapter->crit_lock);
- return;
+ return IAVF_NO_RESCHED;
}
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
"Failed to communicate with PF; waiting before retry\n");
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, (5 * HZ));
- return;
+ return 5000;
}
/* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
- return;
+ return 1000;
case __IAVF_COMM_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK,
&adapter->crit_section)) {
@@ -2791,8 +2964,7 @@ static void iavf_watchdog_task(struct work_struct *work)
*/
iavf_change_state(adapter, __IAVF_INIT_FAILED);
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- mutex_unlock(&adapter->crit_lock);
- return;
+ return IAVF_NO_RESCHED;
}
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
@@ -2810,16 +2982,9 @@ static void iavf_watchdog_task(struct work_struct *work)
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task,
- msecs_to_jiffies(10));
- return;
+ return 10;
case __IAVF_RESETTING:
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- HZ * 2);
- return;
+ return 2000;
case __IAVF_DOWN:
case __IAVF_DOWN_PENDING:
case __IAVF_TESTING:
@@ -2846,8 +3011,7 @@ static void iavf_watchdog_task(struct work_struct *work)
break;
case __IAVF_REMOVE:
default:
- mutex_unlock(&adapter->crit_lock);
- return;
+ return IAVF_NO_RESCHED;
}
/* check for hw reset */
@@ -2857,22 +3021,29 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, HZ * 2);
- return;
}
- mutex_unlock(&adapter->crit_lock);
-restart_watchdog:
+ return adapter->aq_required ? 20 : 2000;
+}
+
+static void iavf_watchdog_task(struct work_struct *work)
+{
+ struct iavf_adapter *adapter = container_of(work,
+ struct iavf_adapter,
+ watchdog_task.work);
+ struct net_device *netdev = adapter->netdev;
+ int msec_delay;
+
+ netdev_lock(netdev);
+ msec_delay = iavf_watchdog_step(adapter);
+ /* note that we schedule a different task */
if (adapter->state >= __IAVF_DOWN)
queue_work(adapter->wq, &adapter->adminq_task);
- if (adapter->aq_required)
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(20));
- else
+
+ if (msec_delay != IAVF_NO_RESCHED)
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- HZ * 2);
+ msecs_to_jiffies(msec_delay));
+ netdev_unlock(netdev);
}
/**
@@ -2880,14 +3051,15 @@ restart_watchdog:
* @adapter: board private structure
*
* Set communication failed flag and free all resources.
- * NOTE: This function is expected to be called with crit_lock being held.
- **/
+ */
static void iavf_disable_vf(struct iavf_adapter *adapter)
{
struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *fv, *fvtmp;
struct iavf_cloud_filter *cf, *cftmp;
+ netdev_assert_locked(adapter->netdev);
+
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
/* We don't use netif_running() because it may be true prior to
@@ -2987,17 +3159,7 @@ static void iavf_reset_task(struct work_struct *work)
int i = 0, err;
bool running;
- /* When device is being removed it doesn't make sense to run the reset
- * task, just return in such a case.
- */
- mutex_lock(&netdev->lock);
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state != __IAVF_REMOVE)
- queue_work(adapter->wq, &adapter->reset_task);
-
- mutex_unlock(&netdev->lock);
- return;
- }
+ netdev_lock(netdev);
iavf_misc_irq_disable(adapter);
if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
@@ -3042,12 +3204,22 @@ static void iavf_reset_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
iavf_disable_vf(adapter);
- mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
continue_reset:
+ /* If we are still early in the state machine, just restart. */
+ if (adapter->state <= __IAVF_INIT_FAILED) {
+ iavf_shutdown_adminq(hw);
+ iavf_change_state(adapter, __IAVF_STARTUP);
+ iavf_startup(adapter);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ netdev_unlock(netdev);
+ return;
+ }
+
/* We don't use netif_running() because it may be true prior to
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
@@ -3099,15 +3271,18 @@ continue_reset:
}
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
- /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
- * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
- * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
- * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
- * been successfully sent and negotiated
- */
- adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
+ /* Certain capabilities require an extended negotiation process using
+ * extra messages that must be processed after getting the VF
+ * configuration. The related checks such as VLAN_V2_ALLOWED() are not
+ * reliable here, since the configuration has not yet been negotiated.
+ *
+ * Always set these flags, since them related VIRTCHNL messages won't
+ * be sent until after VIRTCHNL_OP_GET_VF_RESOURCES.
+ */
+ adapter->aq_required |= IAVF_FLAG_AQ_EXTENDED_CAPS;
+
spin_lock_bh(&adapter->mac_vlan_list_lock);
/* Delete filter for the current MAC address, it could have
@@ -3183,8 +3358,7 @@ continue_reset:
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
wake_up(&adapter->reset_waitqueue);
- mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return;
reset_err:
@@ -3194,8 +3368,7 @@ reset_err:
}
iavf_disable_vf(adapter);
- mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
}
@@ -3207,6 +3380,7 @@ static void iavf_adminq_task(struct work_struct *work)
{
struct iavf_adapter *adapter =
container_of(work, struct iavf_adapter, adminq_task);
+ struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
enum virtchnl_ops v_op;
@@ -3214,13 +3388,7 @@ static void iavf_adminq_task(struct work_struct *work)
u32 val, oldval;
u16 pending;
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE)
- return;
-
- queue_work(adapter->wq, &adapter->adminq_task);
- goto out;
- }
+ netdev_lock(netdev);
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
goto unlock;
@@ -3287,8 +3455,7 @@ static void iavf_adminq_task(struct work_struct *work)
freedom:
kfree(event.msg_buf);
unlock:
- mutex_unlock(&adapter->crit_lock);
-out:
+ netdev_unlock(netdev);
/* re-enable Admin queue interrupt cause */
iavf_misc_irq_enable(adapter);
}
@@ -3667,10 +3834,8 @@ exit:
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return 0;
- mutex_lock(&netdev->lock);
netif_set_real_num_rx_queues(netdev, total_qps);
netif_set_real_num_tx_queues(netdev, total_qps);
- mutex_unlock(&netdev->lock);
return ret;
}
@@ -3983,8 +4148,8 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower)
{
int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
- struct iavf_cloud_filter *filter = NULL;
- int err = -EINVAL, count = 50;
+ struct iavf_cloud_filter *filter;
+ int err;
if (tc < 0) {
dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
@@ -3994,17 +4159,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter)
return -ENOMEM;
-
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(filter);
- return err;
- }
- udelay(1);
- }
-
filter->cookie = cls_flower->cookie;
+ netdev_lock(adapter->netdev);
+
/* bail out here if filter already exists */
spin_lock_bh(&adapter->cloud_filter_list_lock);
if (iavf_find_cf(adapter, &cls_flower->cookie)) {
@@ -4038,7 +4196,7 @@ err:
if (err)
kfree(filter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(adapter->netdev);
return err;
}
@@ -4335,33 +4493,20 @@ static int iavf_open(struct net_device *netdev)
struct iavf_adapter *adapter = netdev_priv(netdev);
int err;
+ netdev_assert_locked(netdev);
+
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock)) {
- /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
- * is already taken and iavf_open is called from an upper
- * device's notifier reacting on NETDEV_REGISTER event.
- * We have to leave here to avoid dead lock.
- */
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
- return -EBUSY;
-
- usleep_range(500, 1000);
- }
-
- if (adapter->state != __IAVF_DOWN) {
- err = -EBUSY;
- goto err_unlock;
- }
+ if (adapter->state != __IAVF_DOWN)
+ return -EBUSY;
if (adapter->state == __IAVF_RUNNING &&
!test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
- err = 0;
- goto err_unlock;
+ return 0;
}
/* allocate transmit descriptors */
@@ -4380,9 +4525,7 @@ static int iavf_open(struct net_device *netdev)
goto err_req_irq;
spin_lock_bh(&adapter->mac_vlan_list_lock);
-
iavf_add_filter(adapter, adapter->hw.mac.addr);
-
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* Restore filters that were removed with IFF_DOWN */
@@ -4395,8 +4538,6 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true);
- mutex_unlock(&adapter->crit_lock);
-
return 0;
err_req_irq:
@@ -4406,8 +4547,6 @@ err_setup_rx:
iavf_free_all_rx_resources(adapter);
err_setup_tx:
iavf_free_all_tx_resources(adapter);
-err_unlock:
- mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -4429,12 +4568,10 @@ static int iavf_close(struct net_device *netdev)
u64 aq_to_restore;
int status;
- mutex_lock(&adapter->crit_lock);
+ netdev_assert_locked(netdev);
- if (adapter->state <= __IAVF_DOWN_PENDING) {
- mutex_unlock(&adapter->crit_lock);
+ if (adapter->state <= __IAVF_DOWN_PENDING)
return 0;
- }
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
@@ -4465,7 +4602,7 @@ static int iavf_close(struct net_device *netdev)
iavf_change_state(adapter, __IAVF_DOWN_PENDING);
iavf_free_traffic_irqs(adapter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
@@ -4483,10 +4620,10 @@ static int iavf_close(struct net_device *netdev)
msecs_to_jiffies(500));
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
+ netdev_lock(netdev);
- mutex_lock(&adapter->crit_lock);
adapter->aq_required |= aq_to_restore;
- mutex_unlock(&adapter->crit_lock);
+
return 0;
}
@@ -4948,6 +5085,25 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
return iavf_fix_strip_features(adapter, features);
}
+static int iavf_hwstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ *config = adapter->ptp.hwtstamp_config;
+
+ return 0;
+}
+
+static int iavf_hwstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return iavf_ptp_set_ts_config(adapter, config, extack);
+}
+
static int
iavf_verify_shaper(struct net_shaper_binding *binding,
const struct net_shaper *shaper,
@@ -4976,15 +5132,16 @@ iavf_shaper_set(struct net_shaper_binding *binding,
struct iavf_adapter *adapter = netdev_priv(binding->netdev);
const struct net_shaper_handle *handle = &shaper->handle;
struct iavf_ring *tx_ring;
- int ret = 0;
+ int ret;
+
+ netdev_assert_locked(adapter->netdev);
- mutex_lock(&adapter->crit_lock);
if (handle->id >= adapter->num_active_queues)
- goto unlock;
+ return 0;
ret = iavf_verify_shaper(binding, shaper, extack);
if (ret)
- goto unlock;
+ return ret;
tx_ring = &adapter->tx_rings[handle->id];
@@ -4994,9 +5151,7 @@ iavf_shaper_set(struct net_shaper_binding *binding,
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
-unlock:
- mutex_unlock(&adapter->crit_lock);
- return ret;
+ return 0;
}
static int iavf_shaper_del(struct net_shaper_binding *binding,
@@ -5006,9 +5161,10 @@ static int iavf_shaper_del(struct net_shaper_binding *binding,
struct iavf_adapter *adapter = netdev_priv(binding->netdev);
struct iavf_ring *tx_ring;
- mutex_lock(&adapter->crit_lock);
+ netdev_assert_locked(adapter->netdev);
+
if (handle->id >= adapter->num_active_queues)
- goto unlock;
+ return 0;
tx_ring = &adapter->tx_rings[handle->id];
tx_ring->q_shaper.bw_min = 0;
@@ -5017,8 +5173,6 @@ static int iavf_shaper_del(struct net_shaper_binding *binding,
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
-unlock:
- mutex_unlock(&adapter->crit_lock);
return 0;
}
@@ -5056,6 +5210,8 @@ static const struct net_device_ops iavf_netdev_ops = {
.ndo_set_features = iavf_set_features,
.ndo_setup_tc = iavf_setup_tc,
.net_shaper_ops = &iavf_shaper_ops,
+ .ndo_hwtstamp_get = iavf_hwstamp_get,
+ .ndo_hwtstamp_set = iavf_hwstamp_set,
};
/**
@@ -5277,10 +5433,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_alloc_qos_cap;
}
- /* set up the locks for the AQ, do this only once in probe
- * and destroy them only once in remove
- */
- mutex_init(&adapter->crit_lock);
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
@@ -5310,6 +5462,10 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating virtchannel events */
init_waitqueue_head(&adapter->vc_waitqueue);
+ INIT_LIST_HEAD(&adapter->ptp.aq_cmds);
+ init_waitqueue_head(&adapter->ptp.phc_time_waitqueue);
+ mutex_init(&adapter->ptp.aq_cmd_lock);
+
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Initialization goes on in the work. Do not add more of it below. */
@@ -5339,20 +5495,24 @@ static int iavf_suspend(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct iavf_adapter *adapter = netdev_priv(netdev);
+ bool running;
netif_device_detach(netdev);
- mutex_lock(&adapter->crit_lock);
-
- if (netif_running(netdev)) {
+ running = netif_running(netdev);
+ if (running)
rtnl_lock();
+ netdev_lock(netdev);
+
+ if (running)
iavf_down(adapter);
- rtnl_unlock();
- }
+
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
+ if (running)
+ rtnl_unlock();
return 0;
}
@@ -5429,20 +5589,20 @@ static void iavf_remove(struct pci_dev *pdev)
* There are flows where register/unregister netdev may race.
*/
while (1) {
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(netdev);
if (adapter->state == __IAVF_RUNNING ||
adapter->state == __IAVF_DOWN ||
adapter->state == __IAVF_INIT_FAILED) {
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
break;
}
/* Simply return if we already went through iavf_shutdown */
if (adapter->state == __IAVF_REMOVE) {
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
usleep_range(500, 1000);
}
cancel_delayed_work_sync(&adapter->watchdog_task);
@@ -5451,7 +5611,7 @@ static void iavf_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(netdev);
dev_info(&adapter->pdev->dev, "Removing device\n");
iavf_change_state(adapter, __IAVF_REMOVE);
@@ -5463,11 +5623,15 @@ static void iavf_remove(struct pci_dev *pdev)
msleep(50);
}
+ iavf_ptp_release(adapter);
+
iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
+ netdev_unlock(netdev);
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->adminq_task);
+ netdev_lock(netdev);
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
@@ -5485,8 +5649,7 @@ static void iavf_remove(struct pci_dev *pdev)
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
mutex_destroy(&hw->aq.asq_mutex);
- mutex_unlock(&adapter->crit_lock);
- mutex_destroy(&adapter->crit_lock);
+ netdev_unlock(netdev);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.c b/drivers/net/ethernet/intel/iavf/iavf_ptp.c
new file mode 100644
index 000000000000..b4d5eda2e84f
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Intel Corporation. */
+
+#include "iavf.h"
+#include "iavf_ptp.h"
+
+#define iavf_clock_to_adapter(info) \
+ container_of_const(info, struct iavf_adapter, ptp.info)
+
+/**
+ * iavf_ptp_disable_rx_tstamp - Disable timestamping in Rx rings
+ * @adapter: private adapter structure
+ *
+ * Disable timestamp reporting for all Rx rings.
+ */
+static void iavf_ptp_disable_rx_tstamp(struct iavf_adapter *adapter)
+{
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rx_rings[i].flags &= ~IAVF_TXRX_FLAGS_HW_TSTAMP;
+}
+
+/**
+ * iavf_ptp_enable_rx_tstamp - Enable timestamping in Rx rings
+ * @adapter: private adapter structure
+ *
+ * Enable timestamp reporting for all Rx rings.
+ */
+static void iavf_ptp_enable_rx_tstamp(struct iavf_adapter *adapter)
+{
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rx_rings[i].flags |= IAVF_TXRX_FLAGS_HW_TSTAMP;
+}
+
+/**
+ * iavf_ptp_set_timestamp_mode - Set device timestamping mode
+ * @adapter: private adapter structure
+ * @config: pointer to kernel_hwtstamp_config
+ *
+ * Set the timestamping mode requested from the userspace.
+ *
+ * Note: this function always translates Rx timestamp requests for any packet
+ * category into HWTSTAMP_FILTER_ALL.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int iavf_ptp_set_timestamp_mode(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config)
+{
+ /* Reserved for future extensions. */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ return -EOPNOTSUPP;
+ default:
+ return -ERANGE;
+ }
+
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
+ iavf_ptp_disable_rx_tstamp(adapter);
+ return 0;
+ } else if (config->rx_filter > HWTSTAMP_FILTER_NTP_ALL) {
+ return -ERANGE;
+ } else if (!(iavf_ptp_cap_supported(adapter,
+ VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))) {
+ return -EOPNOTSUPP;
+ }
+
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ iavf_ptp_enable_rx_tstamp(adapter);
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_set_ts_config - Set timestamping configuration
+ * @adapter: private adapter structure
+ * @config: pointer to kernel_hwtstamp_config structure
+ * @extack: pointer to netlink_ext_ack structure
+ *
+ * Program the requested timestamping configuration to the device.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = iavf_ptp_set_timestamp_mode(adapter, config);
+ if (err)
+ return err;
+
+ /* Save successful settings for future reference */
+ adapter->ptp.hwtstamp_config = *config;
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_cap_supported - Check if a PTP capability is supported
+ * @adapter: private adapter structure
+ * @cap: the capability bitmask to check
+ *
+ * Return: true if every capability set in cap is also set in the enabled
+ * capabilities reported by the PF, false otherwise.
+ */
+bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap)
+{
+ if (!IAVF_PTP_ALLOWED(adapter))
+ return false;
+
+ /* Only return true if every bit in cap is set in hw_caps.caps */
+ return (adapter->ptp.hw_caps.caps & cap) == cap;
+}
+
+/**
+ * iavf_allocate_ptp_cmd - Allocate a PTP command message structure
+ * @v_opcode: the virtchnl opcode
+ * @msglen: length in bytes of the associated virtchnl structure
+ *
+ * Allocates a PTP command message and pre-fills it with the provided message
+ * length and opcode.
+ *
+ * Return: allocated PTP command.
+ */
+static struct iavf_ptp_aq_cmd *iavf_allocate_ptp_cmd(enum virtchnl_ops v_opcode,
+ u16 msglen)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+
+ cmd = kzalloc(struct_size(cmd, msg, msglen), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->v_opcode = v_opcode;
+ cmd->msglen = msglen;
+
+ return cmd;
+}
+
+/**
+ * iavf_queue_ptp_cmd - Queue PTP command for sending over virtchnl
+ * @adapter: private adapter structure
+ * @cmd: the command structure to send
+ *
+ * Queue the given command structure into the PTP virtchnl command queue tos
+ * end to the PF.
+ */
+static void iavf_queue_ptp_cmd(struct iavf_adapter *adapter,
+ struct iavf_ptp_aq_cmd *cmd)
+{
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ list_add_tail(&cmd->list, &adapter->ptp.aq_cmds);
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+
+ adapter->aq_required |= IAVF_FLAG_AQ_SEND_PTP_CMD;
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+}
+
+/**
+ * iavf_send_phc_read - Send request to read PHC time
+ * @adapter: private adapter structure
+ *
+ * Send a request to obtain the PTP hardware clock time. This allocates the
+ * VIRTCHNL_OP_1588_PTP_GET_TIME message and queues it up to send to
+ * indirectly read the PHC time.
+ *
+ * This function does not wait for the reply from the PF.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+static int iavf_send_phc_read(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+
+ if (!adapter->ptp.clock)
+ return -EOPNOTSUPP;
+
+ cmd = iavf_allocate_ptp_cmd(VIRTCHNL_OP_1588_PTP_GET_TIME,
+ sizeof(struct virtchnl_phc_time));
+ if (!cmd)
+ return -ENOMEM;
+
+ iavf_queue_ptp_cmd(adapter, cmd);
+
+ return 0;
+}
+
+/**
+ * iavf_read_phc_indirect - Indirectly read the PHC time via virtchnl
+ * @adapter: private adapter structure
+ * @ts: storage for the timestamp value
+ * @sts: system timestamp values before and after the read
+ *
+ * Used when the device does not have direct register access to the PHC time.
+ * Indirectly reads the time via the VIRTCHNL_OP_1588_PTP_GET_TIME, and waits
+ * for the reply from the PF.
+ *
+ * Based on some simple measurements using ftrace and phc2sys, this clock
+ * access method has about a ~110 usec latency even when the system is not
+ * under load. In order to achieve acceptable results when using phc2sys with
+ * the indirect clock access method, it is recommended to use more
+ * conservative proportional and integration constants with the P/I servo.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+static int iavf_read_phc_indirect(struct iavf_adapter *adapter,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ long ret;
+ int err;
+
+ adapter->ptp.phc_time_ready = false;
+
+ ptp_read_system_prets(sts);
+
+ err = iavf_send_phc_read(adapter);
+ if (err)
+ return err;
+
+ ret = wait_event_interruptible_timeout(adapter->ptp.phc_time_waitqueue,
+ adapter->ptp.phc_time_ready,
+ HZ);
+
+ ptp_read_system_postts(sts);
+
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return -EBUSY;
+
+ *ts = ns_to_timespec64(adapter->ptp.cached_phc_time);
+
+ return 0;
+}
+
+static int iavf_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct iavf_adapter *adapter = iavf_clock_to_adapter(info);
+
+ if (!adapter->ptp.clock)
+ return -EOPNOTSUPP;
+
+ return iavf_read_phc_indirect(adapter, ts, sts);
+}
+
+/**
+ * iavf_ptp_cache_phc_time - Cache PHC time for performing timestamp extension
+ * @adapter: private adapter structure
+ *
+ * Periodically cache the PHC time in order to allow for timestamp extension.
+ * This is required because the Tx and Rx timestamps only contain 32bits of
+ * nanoseconds. Timestamp extension allows calculating the corrected 64bit
+ * timestamp. This algorithm relies on the cached time being within ~1 second
+ * of the timestamp.
+ */
+static void iavf_ptp_cache_phc_time(struct iavf_adapter *adapter)
+{
+ if (!time_is_before_jiffies(adapter->ptp.cached_phc_updated + HZ))
+ return;
+
+ /* The response from virtchnl will store the time into
+ * cached_phc_time.
+ */
+ iavf_send_phc_read(adapter);
+}
+
+/**
+ * iavf_ptp_do_aux_work - Perform periodic work required for PTP support
+ * @info: PTP clock info structure
+ *
+ * Handler to take care of periodic work required for PTP operation. This
+ * includes the following tasks:
+ *
+ * 1) updating cached_phc_time
+ *
+ * cached_phc_time is used by the Tx and Rx timestamp flows in order to
+ * perform timestamp extension, by carefully comparing the timestamp
+ * 32bit nanosecond timestamps and determining the corrected 64bit
+ * timestamp value to report to userspace. This algorithm only works if
+ * the cached_phc_time is within ~1 second of the Tx or Rx timestamp
+ * event. This task periodically reads the PHC time and stores it, to
+ * ensure that timestamp extension operates correctly.
+ *
+ * Returns: time in jiffies until the periodic task should be re-scheduled.
+ */
+static long iavf_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct iavf_adapter *adapter = iavf_clock_to_adapter(info);
+
+ iavf_ptp_cache_phc_time(adapter);
+
+ /* Check work about twice a second */
+ return msecs_to_jiffies(500);
+}
+
+/**
+ * iavf_ptp_register_clock - Register a new PTP for userspace
+ * @adapter: private adapter structure
+ *
+ * Allocate and register a new PTP clock device if necessary.
+ *
+ * Return: 0 if success, error otherwise.
+ */
+static int iavf_ptp_register_clock(struct iavf_adapter *adapter)
+{
+ struct ptp_clock_info *ptp_info = &adapter->ptp.info;
+ struct device *dev = &adapter->pdev->dev;
+ struct ptp_clock *clock;
+
+ snprintf(ptp_info->name, sizeof(ptp_info->name), "%s-%s-clk",
+ KBUILD_MODNAME, dev_name(dev));
+ ptp_info->owner = THIS_MODULE;
+ ptp_info->gettimex64 = iavf_ptp_gettimex64;
+ ptp_info->do_aux_work = iavf_ptp_do_aux_work;
+
+ clock = ptp_clock_register(ptp_info, dev);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ adapter->ptp.clock = clock;
+
+ dev_dbg(&adapter->pdev->dev, "PTP clock %s registered\n",
+ adapter->ptp.info.name);
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_init - Initialize PTP support if capability was negotiated
+ * @adapter: private adapter structure
+ *
+ * Initialize PTP functionality, based on the capabilities that the PF has
+ * enabled for this VF.
+ */
+void iavf_ptp_init(struct iavf_adapter *adapter)
+{
+ int err;
+
+ if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC)) {
+ pci_notice(adapter->pdev,
+ "Device does not have PTP clock support\n");
+ return;
+ }
+
+ err = iavf_ptp_register_clock(adapter);
+ if (err) {
+ pci_err(adapter->pdev,
+ "Failed to register PTP clock device (%p)\n",
+ ERR_PTR(err));
+ return;
+ }
+
+ for (int i = 0; i < adapter->num_active_queues; i++) {
+ struct iavf_ring *rx_ring = &adapter->rx_rings[i];
+
+ rx_ring->ptp = &adapter->ptp;
+ }
+
+ ptp_schedule_worker(adapter->ptp.clock, 0);
+}
+
+/**
+ * iavf_ptp_release - Disable PTP support
+ * @adapter: private adapter structure
+ *
+ * Release all PTP resources that were previously initialized.
+ */
+void iavf_ptp_release(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd, *tmp;
+
+ if (!adapter->ptp.clock)
+ return;
+
+ pci_dbg(adapter->pdev, "removing PTP clock %s\n",
+ adapter->ptp.info.name);
+ ptp_clock_unregister(adapter->ptp.clock);
+ adapter->ptp.clock = NULL;
+
+ /* Cancel any remaining uncompleted PTP clock commands */
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &adapter->ptp.aq_cmds, list) {
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+
+ adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ iavf_ptp_disable_rx_tstamp(adapter);
+}
+
+/**
+ * iavf_ptp_process_caps - Handle change in PTP capabilities
+ * @adapter: private adapter structure
+ *
+ * Handle any state changes necessary due to change in PTP capabilities, such
+ * as after a device reset or change in configuration from the PF.
+ */
+void iavf_ptp_process_caps(struct iavf_adapter *adapter)
+{
+ bool phc = iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC);
+
+ /* Check if the device gained or lost necessary access to support the
+ * PTP hardware clock. If so, driver must respond appropriately by
+ * creating or destroying the PTP clock device.
+ */
+ if (adapter->ptp.clock && !phc)
+ iavf_ptp_release(adapter);
+ else if (!adapter->ptp.clock && phc)
+ iavf_ptp_init(adapter);
+
+ /* Check if the device lost access to Rx timestamp incoming packets */
+ if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
+ adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ iavf_ptp_disable_rx_tstamp(adapter);
+ }
+}
+
+/**
+ * iavf_ptp_extend_32b_timestamp - Convert a 32b nanoseconds timestamp to 64b
+ * nanoseconds
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ *
+ * Extend the 32bit nanosecond timestamp using the following algorithm and
+ * assumptions:
+ *
+ * 1) have a recently cached copy of the PHC time
+ * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
+ * seconds) before or after the PHC time was captured.
+ * 3) calculate the delta between the cached time and the timestamp
+ * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
+ * captured after the PHC time. In this case, the full timestamp is just
+ * the cached PHC time plus the delta.
+ * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
+ * timestamp was captured *before* the PHC time, i.e. because the PHC
+ * cache was updated after the timestamp was captured by hardware. In this
+ * case, the full timestamp is the cached time minus the inverse delta.
+ *
+ * This algorithm works even if the PHC time was updated after a Tx timestamp
+ * was requested, but before the Tx timestamp event was reported from
+ * hardware.
+ *
+ * This calculation primarily relies on keeping the cached PHC time up to
+ * date. If the timestamp was captured more than 2^31 nanoseconds after the
+ * PHC time, it is possible that the lower 32bits of PHC time have
+ * overflowed more than once, and we might generate an incorrect timestamp.
+ *
+ * This is prevented by (a) periodically updating the cached PHC time once
+ * a second, and (b) discarding any Tx timestamp packet if it has waited for
+ * a timestamp for more than one second.
+ *
+ * Return: extended timestamp (to 64b).
+ */
+u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp)
+{
+ u32 low = lower_32_bits(cached_phc_time);
+ u32 delta = in_tstamp - low;
+ u64 ns;
+
+ /* Do not assume that the in_tstamp is always more recent than the
+ * cached PHC time. If the delta is large, it indicates that the
+ * in_tstamp was taken in the past, and should be converted
+ * forward.
+ */
+ if (delta > S32_MAX)
+ ns = cached_phc_time - (low - in_tstamp);
+ else
+ ns = cached_phc_time + delta;
+
+ return ns;
+}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.h b/drivers/net/ethernet/intel/iavf/iavf_ptp.h
new file mode 100644
index 000000000000..783b8f287cd9
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IAVF_PTP_H_
+#define _IAVF_PTP_H_
+
+#include "iavf_types.h"
+
+/* bit indicating whether a 40bit timestamp is valid */
+#define IAVF_PTP_40B_TSTAMP_VALID BIT(24)
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+void iavf_ptp_init(struct iavf_adapter *adapter);
+void iavf_ptp_release(struct iavf_adapter *adapter);
+void iavf_ptp_process_caps(struct iavf_adapter *adapter);
+bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap);
+void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter);
+int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp);
+#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+static inline void iavf_ptp_init(struct iavf_adapter *adapter) { }
+static inline void iavf_ptp_release(struct iavf_adapter *adapter) { }
+static inline void iavf_ptp_process_caps(struct iavf_adapter *adapter) { }
+static inline bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter,
+ u32 cap)
+{
+ return false;
+}
+
+static inline void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter) { }
+static inline int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ return -1;
+}
+
+static inline u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time,
+ u32 in_tstamp)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* _IAVF_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index 62212011c807..c5e4d1823886 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -112,7 +112,7 @@ DECLARE_EVENT_CLASS(
iavf_rx_template,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
@@ -148,7 +148,7 @@ DEFINE_EVENT(
DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq_rx,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 26b424fd6718..422312b8b54a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -8,6 +8,26 @@
#include "iavf.h"
#include "iavf_trace.h"
#include "iavf_prototype.h"
+#include "iavf_ptp.h"
+
+/**
+ * iavf_is_descriptor_done - tests DD bit in Rx descriptor
+ * @qw1: quad word 1 from descriptor to get Descriptor Done field from
+ * @flex: is the descriptor flex or legacy
+ *
+ * This function tests the descriptor done bit in specified descriptor. Because
+ * there are two types of descriptors (legacy and flex) the parameter rx_ring
+ * is used to distinguish.
+ *
+ * Return: true or false based on the state of DD bit in Rx descriptor.
+ */
+static bool iavf_is_descriptor_done(u64 qw1, bool flex)
+{
+ if (flex)
+ return FIELD_GET(IAVF_RXD_FLEX_DD_M, qw1);
+ else
+ return FIELD_GET(IAVF_RXD_LEGACY_DD_M, qw1);
+}
static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
@@ -766,7 +786,7 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(struct iavf_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -845,7 +865,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
.count = rx_ring->count,
};
u16 ntu = rx_ring->next_to_use;
- union iavf_rx_desc *rx_desc;
+ struct iavf_rx_desc *rx_desc;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
@@ -863,7 +883,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(addr);
+ rx_desc->qw0 = cpu_to_le64(addr);
rx_desc++;
ntu++;
@@ -873,7 +893,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
}
/* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.qword1.status_error_len = 0;
+ rx_desc->qw1 = 0;
cleaned_count--;
} while (cleaned_count);
@@ -896,60 +916,43 @@ no_buffers:
}
/**
- * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * iavf_rx_csum - Indicate in skb if hw indicated a good checksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_desc: the receive descriptor
+ * @decoded_pt: decoded ptype information
+ * @csum_bits: decoded Rx descriptor information
**/
-static void iavf_rx_checksum(struct iavf_vsi *vsi,
- struct sk_buff *skb,
- union iavf_rx_desc *rx_desc)
+static void iavf_rx_csum(const struct iavf_vsi *vsi, struct sk_buff *skb,
+ struct libeth_rx_pt decoded_pt,
+ struct libeth_rx_csum csum_bits)
{
- struct libeth_rx_pt decoded;
- u32 rx_error, rx_status;
bool ipv4, ipv6;
- u8 ptype;
- u64 qword;
skb->ip_summed = CHECKSUM_NONE;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
-
- decoded = libie_rx_pt_parse(ptype);
- if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
- return;
-
- rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
- rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
-
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
- ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV6;
- if (ipv4 &&
- (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
- BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
- if (ipv6 &&
- rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
- /* don't increment checksum err here, non-fatal err */
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
+ if (unlikely(csum_bits.pprs))
return;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -960,52 +963,196 @@ checksum_fail:
}
/**
- * iavf_rx_hash - set the hash value in the skb
+ * iavf_legacy_rx_csum - Indicate in skb if hw indicated a good checksum
+ * @vsi: the VSI we care about
+ * @qw1: quad word 1
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ *
+ * Return: decoded checksum bits.
+ **/
+static struct libeth_rx_csum
+iavf_legacy_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
+ const struct libeth_rx_pt decoded_pt)
+{
+ struct libeth_rx_csum csum_bits = {};
+
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
+ return csum_bits;
+
+ csum_bits.ipe = FIELD_GET(IAVF_RXD_LEGACY_IPE_M, qw1);
+ csum_bits.eipe = FIELD_GET(IAVF_RXD_LEGACY_EIPE_M, qw1);
+ csum_bits.l4e = FIELD_GET(IAVF_RXD_LEGACY_L4E_M, qw1);
+ csum_bits.pprs = FIELD_GET(IAVF_RXD_LEGACY_PPRS_M, qw1);
+ csum_bits.l3l4p = FIELD_GET(IAVF_RXD_LEGACY_L3L4P_M, qw1);
+ csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_LEGACY_IPV6EXADD_M, qw1);
+
+ return csum_bits;
+}
+
+/**
+ * iavf_flex_rx_csum - Indicate in skb if hw indicated a good checksum
+ * @vsi: the VSI we care about
+ * @qw1: quad word 1
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ *
+ * Return: decoded checksum bits.
+ **/
+static struct libeth_rx_csum
+iavf_flex_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
+ const struct libeth_rx_pt decoded_pt)
+{
+ struct libeth_rx_csum csum_bits = {};
+
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
+ return csum_bits;
+
+ csum_bits.ipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_IPE_M, qw1);
+ csum_bits.eipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EIPE_M, qw1);
+ csum_bits.l4e = FIELD_GET(IAVF_RXD_FLEX_XSUM_L4E_M, qw1);
+ csum_bits.eudpe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EUDPE_M, qw1);
+ csum_bits.l3l4p = FIELD_GET(IAVF_RXD_FLEX_L3L4P_M, qw1);
+ csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_FLEX_IPV6EXADD_M, qw1);
+ csum_bits.nat = FIELD_GET(IAVF_RXD_FLEX_NAT_M, qw1);
+
+ return csum_bits;
+}
+
+/**
+ * iavf_legacy_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @qw0: quad word 0
+ * @qw1: quad word 1
+ * @skb: skb currently being received and modified
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ **/
+static void iavf_legacy_rx_hash(const struct iavf_ring *ring, __le64 qw0,
+ __le64 qw1, struct sk_buff *skb,
+ const struct libeth_rx_pt decoded_pt)
+{
+ const __le64 rss_mask = cpu_to_le64(IAVF_RXD_LEGACY_FLTSTAT_M);
+ u32 hash;
+
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
+ return;
+
+ if ((qw1 & rss_mask) == rss_mask) {
+ hash = le64_get_bits(qw0, IAVF_RXD_LEGACY_RSS_M);
+ libeth_rx_pt_set_hash(skb, hash, decoded_pt);
+ }
+}
+
+/**
+ * iavf_flex_rx_hash - set the hash value in the skb
* @ring: descriptor ring
- * @rx_desc: specific descriptor
+ * @qw1: quad word 1
* @skb: skb currently being received and modified
- * @rx_ptype: Rx packet type
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
**/
-static void iavf_rx_hash(struct iavf_ring *ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_flex_rx_hash(const struct iavf_ring *ring, __le64 qw1,
+ struct sk_buff *skb,
+ const struct libeth_rx_pt decoded_pt)
{
- struct libeth_rx_pt decoded;
+ bool rss_valid;
u32 hash;
- const __le64 rss_mask =
- cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
- IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- decoded = libie_rx_pt_parse(rx_ptype);
- if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
return;
- if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
- hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- libeth_rx_pt_set_hash(skb, hash, decoded);
+ rss_valid = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_VALID_M);
+ if (rss_valid) {
+ hash = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_HASH_M);
+ libeth_rx_pt_set_hash(skb, hash, decoded_pt);
}
}
/**
+ * iavf_flex_rx_tstamp - Capture Rx timestamp from the descriptor
+ * @rx_ring: descriptor ring
+ * @qw2: quad word 2 of descriptor
+ * @qw3: quad word 3 of descriptor
+ * @skb: skb currently being received
+ *
+ * Read the Rx timestamp value from the descriptor and pass it to the stack.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ */
+static void iavf_flex_rx_tstamp(const struct iavf_ring *rx_ring, __le64 qw2,
+ __le64 qw3, struct sk_buff *skb)
+{
+ u32 tstamp;
+ u64 ns;
+
+ /* Skip processing if timestamps aren't enabled */
+ if (!(rx_ring->flags & IAVF_TXRX_FLAGS_HW_TSTAMP))
+ return;
+
+ /* Check if this Rx descriptor has a valid timestamp */
+ if (!le64_get_bits(qw2, IAVF_PTP_40B_TSTAMP_VALID))
+ return;
+
+ /* the ts_low field only contains the valid bit and sub-nanosecond
+ * precision, so we don't need to extract it.
+ */
+ tstamp = le64_get_bits(qw3, IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M);
+
+ ns = iavf_ptp_extend_32b_timestamp(rx_ring->ptp->cached_phc_time,
+ tstamp);
+
+ *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
+ .hwtstamp = ns_to_ktime(ns),
+ };
+}
+
+/**
* iavf_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @rx_ptype: the packet type decoded by hardware
+ * @ptype: the packet type decoded by hardware
+ * @flex: is the descriptor flex or legacy
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
**/
-static void
-iavf_process_skb_fields(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_process_skb_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc,
+ struct sk_buff *skb, u32 ptype,
+ bool flex)
{
- iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
-
- iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
+ struct libeth_rx_csum csum_bits;
+ struct libeth_rx_pt decoded_pt;
+ __le64 qw0 = rx_desc->qw0;
+ __le64 qw1 = rx_desc->qw1;
+ __le64 qw2 = rx_desc->qw2;
+ __le64 qw3 = rx_desc->qw3;
+
+ decoded_pt = libie_rx_pt_parse(ptype);
+
+ if (flex) {
+ iavf_flex_rx_hash(rx_ring, qw1, skb, decoded_pt);
+ iavf_flex_rx_tstamp(rx_ring, qw2, qw3, skb);
+ csum_bits = iavf_flex_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
+ decoded_pt);
+ } else {
+ iavf_legacy_rx_hash(rx_ring, qw0, qw1, skb, decoded_pt);
+ csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
+ decoded_pt);
+ }
+ iavf_rx_csum(rx_ring->vsi, skb, decoded_pt, csum_bits);
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -1092,8 +1239,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
/**
* iavf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
+ * @fields: Rx descriptor extracted fields
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -1101,8 +1247,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
* that this is in fact a non-EOP buffer.
**/
static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct libeth_rqe_info fields)
{
u32 ntc = rx_ring->next_to_clean + 1;
@@ -1113,8 +1258,7 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
prefetch(IAVF_RX_DESC(rx_ring, ntc));
/* if we are the last buffer then there is nothing else to do */
-#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
- if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
+ if (likely(fields.eop))
return false;
rx_ring->rx_stats.non_eop_descs++;
@@ -1123,6 +1267,109 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
}
/**
+ * iavf_extract_legacy_rx_fields - Extract fields from the Rx descriptor
+ * @rx_ring: rx descriptor ring
+ * @rx_desc: the descriptor to process
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ *
+ * Return: fields extracted from the Rx descriptor.
+ */
+static struct libeth_rqe_info
+iavf_extract_legacy_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc)
+{
+ u64 qw0 = le64_to_cpu(rx_desc->qw0);
+ u64 qw1 = le64_to_cpu(rx_desc->qw1);
+ u64 qw2 = le64_to_cpu(rx_desc->qw2);
+ struct libeth_rqe_info fields;
+ bool l2tag1p, l2tag2p;
+
+ fields.eop = FIELD_GET(IAVF_RXD_LEGACY_EOP_M, qw1);
+ fields.len = FIELD_GET(IAVF_RXD_LEGACY_LENGTH_M, qw1);
+
+ if (!fields.eop)
+ return fields;
+
+ fields.rxe = FIELD_GET(IAVF_RXD_LEGACY_RXE_M, qw1);
+ fields.ptype = FIELD_GET(IAVF_RXD_LEGACY_PTYPE_M, qw1);
+ fields.vlan = 0;
+
+ if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ l2tag1p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1P_M, qw1);
+ if (l2tag1p)
+ fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1_M, qw0);
+ } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ l2tag2p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2P_M, qw2);
+ if (l2tag2p)
+ fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2_M, qw2);
+ }
+
+ return fields;
+}
+
+/**
+ * iavf_extract_flex_rx_fields - Extract fields from the Rx descriptor
+ * @rx_ring: rx descriptor ring
+ * @rx_desc: the descriptor to process
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ *
+ * Return: fields extracted from the Rx descriptor.
+ */
+static struct libeth_rqe_info
+iavf_extract_flex_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc)
+{
+ struct libeth_rqe_info fields = {};
+ u64 qw0 = le64_to_cpu(rx_desc->qw0);
+ u64 qw1 = le64_to_cpu(rx_desc->qw1);
+ u64 qw2 = le64_to_cpu(rx_desc->qw2);
+ bool l2tag1p, l2tag2p;
+
+ fields.eop = FIELD_GET(IAVF_RXD_FLEX_EOP_M, qw1);
+ fields.len = FIELD_GET(IAVF_RXD_FLEX_PKT_LEN_M, qw0);
+
+ if (!fields.eop)
+ return fields;
+
+ fields.rxe = FIELD_GET(IAVF_RXD_FLEX_RXE_M, qw1);
+ fields.ptype = FIELD_GET(IAVF_RXD_FLEX_PTYPE_M, qw0);
+ fields.vlan = 0;
+
+ if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ l2tag1p = FIELD_GET(IAVF_RXD_FLEX_L2TAG1P_M, qw1);
+ if (l2tag1p)
+ fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG1_M, qw1);
+ } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ l2tag2p = FIELD_GET(IAVF_RXD_FLEX_L2TAG2P_M, qw2);
+ if (l2tag2p)
+ fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG2_2_M, qw2);
+ }
+
+ return fields;
+}
+
+static struct libeth_rqe_info
+iavf_extract_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc,
+ bool flex)
+{
+ if (flex)
+ return iavf_extract_flex_rx_fields(rx_ring, rx_desc);
+ else
+ return iavf_extract_legacy_rx_fields(rx_ring, rx_desc);
+}
+
+/**
* iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1136,18 +1383,17 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
**/
static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
{
+ bool flex = rx_ring->rxdid == VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
bool failure = false;
while (likely(total_rx_packets < (unsigned int)budget)) {
+ struct libeth_rqe_info fields;
struct libeth_fqe *rx_buffer;
- union iavf_rx_desc *rx_desc;
- unsigned int size;
- u16 vlan_tag = 0;
- u8 rx_ptype;
- u64 qword;
+ struct iavf_rx_desc *rx_desc;
+ u64 qw1;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
@@ -1158,35 +1404,32 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
- /* status_error_len will always be zero for unused descriptors
- * because it's cleared in cleanup, and overlaps with hdr_addr
- * which is always zero because packet split isn't used, if the
- * hardware wrote DD then the length will be non-zero
- */
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we have
* verified the descriptor has been written back.
*/
dma_rmb();
-#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
- if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
+
+ qw1 = le64_to_cpu(rx_desc->qw1);
+ /* If DD field (descriptor done) is unset then other fields are
+ * not valid
+ */
+ if (!iavf_is_descriptor_done(qw1, flex))
break;
- size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword);
+ fields = iavf_extract_rx_fields(rx_ring, rx_desc, flex);
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean];
- if (!libeth_rx_sync_for_cpu(rx_buffer, size))
+ if (!libeth_rx_sync_for_cpu(rx_buffer, fields.len))
goto skip_data;
/* retrieve a buffer from the ring */
if (skb)
- iavf_add_rx_frag(skb, rx_buffer, size);
+ iavf_add_rx_frag(skb, rx_buffer, fields.len);
else
- skb = iavf_build_skb(rx_buffer, size);
+ skb = iavf_build_skb(rx_buffer, fields.len);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1197,15 +1440,14 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
skip_data:
cleaned_count++;
- if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb))
+ if (iavf_is_non_eop(rx_ring, fields) || unlikely(!skb))
continue;
- /* ERR_MASK will only have valid bits if EOP set, and
- * what we are doing here is actually checking
- * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
- * the error field
+ /* RXE field in descriptor is an indication of the MAC errors
+ * (like CRC, alignment, oversize etc). If it is set then iavf
+ * should finish.
*/
- if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
+ if (unlikely(fields.rxe)) {
dev_kfree_skb_any(skb);
skb = NULL;
continue;
@@ -1219,22 +1461,11 @@ skip_data:
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
-
/* populate checksum, VLAN, and protocol */
- iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
- rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
- vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
- if (rx_desc->wb.qword2.ext_status &
- cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
- rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
- vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
+ iavf_process_skb_fields(rx_ring, rx_desc, skb, fields.ptype, flex);
iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
- iavf_receive_skb(rx_ring, skb, vlan_tag);
+ iavf_receive_skb(rx_ring, skb, fields.vlan);
skb = NULL;
/* update budget accounting */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index f97c702c0802..79ad554f2d53 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -80,25 +80,6 @@ enum iavf_dyn_idx_t {
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-#define iavf_rx_desc iavf_32byte_rx_desc
-
-/**
- * iavf_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
- * @stat_err_bits: value to mask
- *
- * This function does some fast chicanery in order to return the
- * value of the mask which is really only used for boolean tests.
- * The status_error_len doesn't need to be shifted because it begins
- * at offset zero.
- */
-static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
- const u64 stat_err_bits)
-{
- return !!(rx_desc->wb.qword1.status_error_len &
- cpu_to_le64(stat_err_bits));
-}
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IAVF_RX_INCREMENT(r, i) \
do { \
@@ -262,6 +243,8 @@ struct iavf_ring {
u16 next_to_use;
u16 next_to_clean;
+ u16 rxdid; /* Rx descriptor format */
+
u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
#define IAVF_TXR_FLAGS_ARM_WB BIT(1)
@@ -269,6 +252,7 @@ struct iavf_ring {
#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
+#define IAVF_TXRX_FLAGS_HW_TSTAMP BIT(6)
/* stats structs */
struct iavf_queue_stats stats;
@@ -295,6 +279,8 @@ struct iavf_ring {
* for this ring.
*/
+ struct iavf_ptp *ptp;
+
u32 rx_buf_len;
struct net_shaper q_shaper;
bool q_shaper_update;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index f6b09e57abce..f9e1319620f4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -178,110 +178,116 @@ struct iavf_hw {
char err_str[16];
};
-/* RX Descriptors */
-union iavf_16byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fd_id; /* Flow director filter id */
- __le32 fcoe_param; /* FCoE DDP Context id */
- } hi_dword;
- } qword0;
- struct {
- /* ext status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- } wb; /* writeback */
-};
-
-union iavf_32byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- /* bit 0 of hdr_buffer_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fcoe_param; /* FCoE DDP Context id */
- /* Flow director filter id in case of
- * Programming status desc WB
- */
- __le32 fd_id;
- } hi_dword;
- } qword0;
- struct {
- /* status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- struct {
- __le16 ext_status; /* extended status */
- __le16 rsvd;
- __le16 l2tag2_1;
- __le16 l2tag2_2;
- } qword2;
- struct {
- union {
- __le32 flex_bytes_lo;
- __le32 pe_status;
- } lo_dword;
- union {
- __le32 flex_bytes_hi;
- __le32 fd_id;
- } hi_dword;
- } qword3;
- } wb; /* writeback */
-};
-
-enum iavf_rx_desc_status_bits {
- /* Note: These are predefined bit offsets */
- IAVF_RX_DESC_STATUS_DD_SHIFT = 0,
- IAVF_RX_DESC_STATUS_EOF_SHIFT = 1,
- IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
- IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3,
- IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4,
- IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
- IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- /* Note: Bit 8 is reserved in X710 and XL710 */
- IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
- IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
- IAVF_RX_DESC_STATUS_FLM_SHIFT = 11,
- IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
- IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14,
- IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
- IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- /* Note: For non-tunnel packets INT_UDP_0 is the right status for
- * UDP header
- */
- IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
- IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
-};
-
-#define IAVF_RXD_QW1_STATUS_SHIFT 0
-#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \
- << IAVF_RXD_QW1_STATUS_SHIFT)
+/**
+ * struct iavf_rx_desc - Receive descriptor (both legacy and flexible)
+ * @qw0: quad word 0 fields:
+ * Legacy: Descriptor Type; Mirror ID; L2TAG1P (S-TAG); Filter Status
+ * Flex: Descriptor Type; Mirror ID; UMBCAST; Packet Type; Flexible Flags
+ * Section 0; Packet Length; Header Length; Split Header Flag;
+ * Flexible Flags section 1 / Extended Status
+ * @qw1: quad word 1 fields:
+ * Legacy: Status Field; Error Field; Packet Type; Packet Length (packet,
+ * header, Split Header Flag)
+ * Flex: Status / Error 0 Field; L2TAG1P (S-TAG); Flexible Metadata
+ * Container #0; Flexible Metadata Container #1
+ * @qw2: quad word 2 fields:
+ * Legacy: Extended Status; 1st L2TAG2P (C-TAG); 2nd L2TAG2P (C-TAG)
+ * Flex: Status / Error 1 Field; Flexible Flags section 2; Timestamp Low;
+ * 1st L2TAG2 (C-TAG); 2nd L2TAG2 (C-TAG)
+ * @qw3: quad word 3 fields:
+ * Legacy: FD Filter ID / Flexible Bytes
+ * Flex: Flexible Metadata Container #2; Flexible Metadata Container #3;
+ * Flexible Metadata Container #4 / Timestamp High 0; Flexible
+ * Metadata Container #5 / Timestamp High 1;
+ */
+struct iavf_rx_desc {
+ aligned_le64 qw0;
+/* The hash signature (RSS) */
+#define IAVF_RXD_LEGACY_RSS_M GENMASK_ULL(63, 32)
+/* Stripped C-TAG VLAN from the receive packet */
+#define IAVF_RXD_LEGACY_L2TAG1_M GENMASK_ULL(33, 16)
+/* Packet type */
+#define IAVF_RXD_FLEX_PTYPE_M GENMASK_ULL(25, 16)
+/* Packet length */
+#define IAVF_RXD_FLEX_PKT_LEN_M GENMASK_ULL(45, 32)
+
+ aligned_le64 qw1;
+/* Descriptor done indication flag. */
+#define IAVF_RXD_LEGACY_DD_M BIT(0)
+/* End of packet. Set to 1 if this descriptor is the last one of the packet */
+#define IAVF_RXD_LEGACY_EOP_M BIT(1)
+/* L2 TAG 1 presence indication */
+#define IAVF_RXD_LEGACY_L2TAG1P_M BIT(2)
+/* Detectable L3 and L4 integrity check is processed by the HW */
+#define IAVF_RXD_LEGACY_L3L4P_M BIT(3)
+/* Set when an IPv6 packet contains a Destination Options Header or a Routing
+ * Header.
+ */
+#define IAVF_RXD_LEGACY_IPV6EXADD_M BIT(15)
+/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */
+#define IAVF_RXD_LEGACY_RXE_M BIT(19)
+/* Checksum reports:
+ * - IPE: IP checksum error
+ * - L4E: L4 integrity error
+ * - EIPE: External IP header (tunneled packets)
+ */
+#define IAVF_RXD_LEGACY_IPE_M BIT(22)
+#define IAVF_RXD_LEGACY_L4E_M BIT(23)
+#define IAVF_RXD_LEGACY_EIPE_M BIT(24)
+/* Set for packets that skip checksum calculation in pre-parser */
+#define IAVF_RXD_LEGACY_PPRS_M BIT(26)
+/* Indicates the content in the Filter Status field */
+#define IAVF_RXD_LEGACY_FLTSTAT_M GENMASK_ULL(13, 12)
+/* Packet type */
+#define IAVF_RXD_LEGACY_PTYPE_M GENMASK_ULL(37, 30)
+/* Packet length */
+#define IAVF_RXD_LEGACY_LENGTH_M GENMASK_ULL(51, 38)
+/* Descriptor done indication flag */
+#define IAVF_RXD_FLEX_DD_M BIT(0)
+/* End of packet. Set to 1 if this descriptor is the last one of the packet */
+#define IAVF_RXD_FLEX_EOP_M BIT(1)
+/* Detectable L3 and L4 integrity check is processed by the HW */
+#define IAVF_RXD_FLEX_L3L4P_M BIT(3)
+/* Checksum reports:
+ * - IPE: IP checksum error
+ * - L4E: L4 integrity error
+ * - EIPE: External IP header (tunneled packets)
+ * - EUDPE: External UDP checksum error (tunneled packets)
+ */
+#define IAVF_RXD_FLEX_XSUM_IPE_M BIT(4)
+#define IAVF_RXD_FLEX_XSUM_L4E_M BIT(5)
+#define IAVF_RXD_FLEX_XSUM_EIPE_M BIT(6)
+#define IAVF_RXD_FLEX_XSUM_EUDPE_M BIT(7)
+/* Set when an IPv6 packet contains a Destination Options Header or a Routing
+ * Header.
+ */
+#define IAVF_RXD_FLEX_IPV6EXADD_M BIT(9)
+/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */
+#define IAVF_RXD_FLEX_RXE_M BIT(10)
+/* Indicates that the RSS/HASH result is valid */
+#define IAVF_RXD_FLEX_RSS_VALID_M BIT(12)
+/* L2 TAG 1 presence indication */
+#define IAVF_RXD_FLEX_L2TAG1P_M BIT(13)
+/* Stripped L2 Tag from the receive packet */
+#define IAVF_RXD_FLEX_L2TAG1_M GENMASK_ULL(31, 16)
+/* The hash signature (RSS) */
+#define IAVF_RXD_FLEX_RSS_HASH_M GENMASK_ULL(63, 32)
+
+ aligned_le64 qw2;
+/* L2 Tag 2 Presence */
+#define IAVF_RXD_LEGACY_L2TAG2P_M BIT(0)
+/* Stripped S-TAG VLAN from the receive packet */
+#define IAVF_RXD_LEGACY_L2TAG2_M GENMASK_ULL(63, 32)
+/* Stripped S-TAG VLAN from the receive packet */
+#define IAVF_RXD_FLEX_L2TAG2_2_M GENMASK_ULL(63, 48)
+/* The packet is a UDP tunneled packet */
+#define IAVF_RXD_FLEX_NAT_M BIT(4)
+/* L2 Tag 2 Presence */
+#define IAVF_RXD_FLEX_L2TAG2P_M BIT(11)
+ aligned_le64 qw3;
+#define IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M GENMASK_ULL(63, 32)
+} __aligned(4 * sizeof(__le64));
+static_assert(sizeof(struct iavf_rx_desc) == 32);
#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT
#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
@@ -298,22 +304,6 @@ enum iavf_rx_desc_fltstat_values {
IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
};
-#define IAVF_RXD_QW1_ERROR_SHIFT 19
-#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)
-
-enum iavf_rx_desc_error_bits {
- /* Note: These are predefined bit offsets */
- IAVF_RX_DESC_ERROR_RXE_SHIFT = 0,
- IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
- IAVF_RX_DESC_ERROR_HBO_SHIFT = 2,
- IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
- IAVF_RX_DESC_ERROR_IPE_SHIFT = 3,
- IAVF_RX_DESC_ERROR_L4E_SHIFT = 4,
- IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5,
- IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
- IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7
-};
-
enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_NONE = 0,
IAVF_RX_DESC_ERROR_L3L4E_PROT = 1,
@@ -322,13 +312,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
};
-#define IAVF_RXD_QW1_PTYPE_SHIFT 30
-#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
-
-#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
-#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
- IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
-
#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
#define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)
@@ -347,6 +330,8 @@ enum iavf_rx_desc_ext_status_bits {
IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
};
+#define IAVF_RX_DESC_EXT_STATUS_L2TAG2P_M BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)
+
enum iavf_rx_desc_pe_status_bits {
/* Note: These are predefined bit offsets */
IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_types.h b/drivers/net/ethernet/intel/iavf/iavf_types.h
new file mode 100644
index 000000000000..a095855122bf
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IAVF_TYPES_H_
+#define _IAVF_TYPES_H_
+
+#include "iavf_types.h"
+
+#include <linux/avf/virtchnl.h>
+#include <linux/ptp_clock_kernel.h>
+
+/* structure used to queue PTP commands for processing */
+struct iavf_ptp_aq_cmd {
+ struct list_head list;
+ enum virtchnl_ops v_opcode:16;
+ u16 msglen;
+ u8 msg[] __counted_by(msglen);
+};
+
+struct iavf_ptp {
+ wait_queue_head_t phc_time_waitqueue;
+ struct virtchnl_ptp_caps hw_caps;
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct list_head aq_cmds;
+ u64 cached_phc_time;
+ unsigned long cached_phc_updated;
+ /* Lock protecting access to the AQ command list */
+ struct mutex aq_cmd_lock;
+ struct kernel_hwtstamp_config hwtstamp_config;
+ bool phc_time_ready:1;
+};
+
+#endif /* _IAVF_TYPES_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 15d388b431c5..07f0d0a0f1e2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -4,6 +4,7 @@
#include <linux/net/intel/libie/rx.h>
#include "iavf.h"
+#include "iavf_ptp.h"
#include "iavf_prototype.h"
/**
@@ -78,6 +79,23 @@ iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
return iavf_status_to_errno(status);
received_op =
(enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
+
+ if (received_op == VIRTCHNL_OP_EVENT) {
+ struct iavf_adapter *adapter = hw->back;
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)event->msg_buf;
+
+ if (vpe->event != VIRTCHNL_EVENT_RESET_IMPENDING)
+ continue;
+
+ dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
+ if (!(adapter->flags & IAVF_FLAG_RESET_PENDING))
+ iavf_schedule_reset(adapter,
+ IAVF_FLAG_RESET_PENDING);
+
+ return -EIO;
+ }
+
if (op_to_poll == received_op)
break;
}
@@ -144,9 +162,11 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_ENCAP |
VIRTCHNL_VF_OFFLOAD_TC_U32 |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+ VIRTCHNL_VF_CAP_PTP |
VIRTCHNL_VF_OFFLOAD_ADQ |
VIRTCHNL_VF_OFFLOAD_USO |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
@@ -177,6 +197,54 @@ int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
NULL, 0);
}
+int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter)
+{
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS;
+
+ if (!IAVF_RXDID_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+ NULL, 0);
+}
+
+/**
+ * iavf_send_vf_ptp_caps_msg - Send request for PTP capabilities
+ * @adapter: private adapter structure
+ *
+ * Send the VIRTCHNL_OP_1588_PTP_GET_CAPS command to the PF to request the PTP
+ * capabilities available to this device. This includes the following
+ * potential access:
+ *
+ * * READ_PHC - access to read the PTP hardware clock time
+ * * RX_TSTAMP - access to request Rx timestamps on all received packets
+ *
+ * The PF will reply with the same opcode a filled out copy of the
+ * virtchnl_ptp_caps structure which defines the specifics of which features
+ * are accessible to this device.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter)
+{
+ struct virtchnl_ptp_caps hw_caps = {
+ .caps = VIRTCHNL_1588_PTP_CAP_READ_PHC |
+ VIRTCHNL_1588_PTP_CAP_RX_TSTAMP
+ };
+
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_PTP_CAPS;
+
+ if (!IAVF_PTP_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_1588_PTP_GET_CAPS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ (u8 *)&hw_caps, sizeof(hw_caps));
+}
+
/**
* iavf_validate_num_queues
* @adapter: adapter structure
@@ -263,6 +331,40 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
return err;
}
+int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter)
+{
+ struct iavf_arq_event_info event;
+ u64 rxdids;
+ int err;
+
+ event.msg_buf = (u8 *)&rxdids;
+ event.buf_len = sizeof(rxdids);
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS);
+ if (!err)
+ adapter->supp_rxdids = rxdids;
+
+ return err;
+}
+
+int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter)
+{
+ struct virtchnl_ptp_caps caps = {};
+ struct iavf_arq_event_info event;
+ int err;
+
+ event.msg_buf = (u8 *)&caps;
+ event.buf_len = sizeof(caps);
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_1588_PTP_GET_CAPS);
+ if (!err)
+ adapter->ptp.hw_caps = caps;
+
+ return err;
+}
+
/**
* iavf_configure_queues
* @adapter: adapter structure
@@ -275,6 +377,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
int pairs = adapter->num_active_queues;
struct virtchnl_queue_pair_info *vqpi;
u32 i, max_frame;
+ u8 rx_flags = 0;
size_t len;
max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
@@ -292,6 +395,9 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
if (!vqci)
return;
+ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))
+ rx_flags |= VIRTCHNL_PTP_RX_TSTAMP;
+
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -309,9 +415,12 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = max_frame;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
+ if (IAVF_RXDID_ALLOWED(adapter))
+ vqpi->rxq.rxdid = adapter->rxdid;
if (CRC_OFFLOAD_ALLOWED(adapter))
vqpi->rxq.crc_disable = !!(adapter->netdev->features &
NETIF_F_RXFCS);
+ vqpi->rxq.flags = rx_flags;
vqpi++;
}
@@ -1402,6 +1511,67 @@ void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
}
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+/**
+ * iavf_virtchnl_send_ptp_cmd - Send one queued PTP command
+ * @adapter: adapter private structure
+ *
+ * De-queue one PTP command request and send the command message to the PF.
+ * Clear IAVF_FLAG_AQ_SEND_PTP_CMD if no more messages are left to send.
+ */
+void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+ int err;
+
+ if (!adapter->ptp.clock) {
+ /* This shouldn't be possible to hit, since no messages should
+ * be queued if PTP is not initialized.
+ */
+ pci_err(adapter->pdev, "PTP is not initialized\n");
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ return;
+ }
+
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ cmd = list_first_entry_or_null(&adapter->ptp.aq_cmds,
+ struct iavf_ptp_aq_cmd, list);
+ if (!cmd) {
+ /* no further PTP messages to send */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ goto out_unlock;
+ }
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ pci_err(adapter->pdev,
+ "Cannot send PTP command %d, command %d pending\n",
+ cmd->v_opcode, adapter->current_op);
+ goto out_unlock;
+ }
+
+ err = iavf_send_pf_msg(adapter, cmd->v_opcode, cmd->msg, cmd->msglen);
+ if (!err) {
+ /* Command was sent without errors, so we can remove it from
+ * the list and discard it.
+ */
+ list_del(&cmd->list);
+ kfree(cmd);
+ } else {
+ /* We failed to send the command, try again next cycle */
+ pci_err(adapter->pdev, "Failed to send PTP command %d\n",
+ cmd->v_opcode);
+ }
+
+ if (list_empty(&adapter->ptp.aq_cmds))
+ /* no further PTP messages to send */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+
+out_unlock:
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+}
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+
/**
* iavf_print_link_message - print link up or down
* @adapter: adapter structure
@@ -2098,6 +2268,37 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_virtchnl_ptp_get_time - Respond to VIRTCHNL_OP_1588_PTP_GET_TIME
+ * @adapter: private adapter structure
+ * @data: the message from the PF
+ * @len: length of the message from the PF
+ *
+ * Handle the VIRTCHNL_OP_1588_PTP_GET_TIME message from the PF. This message
+ * is sent by the PF in response to the same op as a request from the VF.
+ * Extract the 64bit nanoseconds time from the message and store it in
+ * cached_phc_time. Then, notify any thread that is waiting for the update via
+ * the wait queue.
+ */
+static void iavf_virtchnl_ptp_get_time(struct iavf_adapter *adapter,
+ void *data, u16 len)
+{
+ struct virtchnl_phc_time *msg = data;
+
+ if (len != sizeof(*msg)) {
+ dev_err_once(&adapter->pdev->dev,
+ "Invalid VIRTCHNL_OP_1588_PTP_GET_TIME from PF. Got size %u, expected %zu\n",
+ len, sizeof(*msg));
+ return;
+ }
+
+ adapter->ptp.cached_phc_time = msg->time;
+ adapter->ptp.cached_phc_updated = jiffies;
+ adapter->ptp.phc_time_ready = true;
+
+ wake_up(&adapter->ptp.phc_time_waitqueue);
+}
+
+/**
* iavf_virtchnl_completion
* @adapter: adapter structure
* @v_opcode: opcode sent by PF
@@ -2509,6 +2710,25 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
aq_required;
}
break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ if (msglen != sizeof(u64))
+ return;
+
+ adapter->supp_rxdids = *(u64 *)msg;
+
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ if (msglen != sizeof(adapter->ptp.hw_caps))
+ return;
+
+ adapter->ptp.hw_caps = *(struct virtchnl_ptp_caps *)msg;
+
+ /* process any state change needed due to new capabilities */
+ iavf_ptp_process_caps(adapter);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ iavf_virtchnl_ptp_get_time(adapter, msg, msglen);
+ break;
case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
iavf_irq_enable(adapter, true);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 3307d551f431..9e0d9f710441 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -32,7 +32,8 @@ ice-y := ice_main.o \
ice_parser_rt.o \
ice_idc.o \
devlink/devlink.o \
- devlink/devlink_port.o \
+ devlink/health.o \
+ devlink/port.o \
ice_sf_eth.o \
ice_sf_vsi_vlan_ops.o \
ice_ddp.o \
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 415445cefdb2..4af60e2f37df 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -6,7 +6,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "devlink.h"
-#include "devlink_port.h"
+#include "port.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
@@ -368,14 +368,18 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
break;
case ICE_VERSION_RUNNING:
- err = devlink_info_version_running_put(req, key, ctx->buf);
+ err = devlink_info_version_running_put_ext(req, key,
+ ctx->buf,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
goto out_free_ctx;
}
break;
case ICE_VERSION_STORED:
- err = devlink_info_version_stored_put(req, key, ctx->buf);
+ err = devlink_info_version_stored_put_ext(req, key,
+ ctx->buf,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
goto out_free_ctx;
@@ -977,6 +981,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv
/* preallocate memory for ice_sched_node */
node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
*priv = node;
return 0;
@@ -1198,6 +1205,25 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
return status;
}
+static void ice_set_min_max_msix(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ &val);
+ if (!err)
+ pf->msix.min = val.vu32;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ &val);
+ if (!err)
+ pf->msix.max = val.vu32;
+}
+
/**
* ice_devlink_reinit_up - do reinit of the given PF
* @pf: pointer to the PF struct
@@ -1207,9 +1233,18 @@ static int ice_devlink_reinit_up(struct ice_pf *pf)
struct ice_vsi *vsi = ice_get_main_vsi(pf);
int err;
+ err = ice_init_hw(&pf->hw);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
+ /* load MSI-X values */
+ ice_set_min_max_msix(pf);
+
err = ice_init_dev(pf);
if (err)
- return err;
+ goto unroll_hw_init;
vsi->flags = ICE_VSI_FLAG_INIT;
@@ -1232,6 +1267,8 @@ err_load:
rtnl_unlock();
err_vsi_cfg:
ice_deinit_dev(pf);
+unroll_hw_init:
+ ice_deinit_hw(&pf->hw);
return err;
}
@@ -1302,8 +1339,13 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
+
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
+ ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2);
return 0;
}
@@ -1313,19 +1355,24 @@ static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
bool roce_ena = ctx->val.vbool;
int ret;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
if (!roce_ena) {
ice_unplug_aux_dev(pf);
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
return 0;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
ret = ice_plug_aux_dev(pf);
if (ret)
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
return ret;
}
@@ -1336,11 +1383,16 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
+
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EOPNOTSUPP;
- if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
+ if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) {
NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
return -EOPNOTSUPP;
}
@@ -1353,8 +1405,13 @@ ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
+ ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP);
return 0;
}
@@ -1364,19 +1421,24 @@ static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
bool iw_ena = ctx->val.vbool;
int ret;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
if (!iw_ena) {
ice_unplug_aux_dev(pf);
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP;
return 0;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_IWARP;
ret = ice_plug_aux_dev(pf);
if (ret)
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP;
return ret;
}
@@ -1391,7 +1453,7 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EOPNOTSUPP;
- if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
+ if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) {
NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
return -EOPNOTSUPP;
}
@@ -1518,6 +1580,43 @@ static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id,
return 0;
}
+static int
+ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (val.vu32 > pf->hw.func_caps.common_cap.num_msix_vectors)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu32 < ICE_MIN_MSIX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ice_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool new_state = val.vbool;
+
+ if (new_state && !test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
enum ice_param_id {
ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
@@ -1533,6 +1632,17 @@ static const struct devlink_param ice_dvl_rdma_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_enable_rdma_validate),
+};
+
+static const struct devlink_param ice_dvl_msix_params[] = {
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_max_pf_validate),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_min_pf_validate),
};
static const struct devlink_param ice_dvl_sched_params[] = {
@@ -1636,6 +1746,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value value;
struct ice_hw *hw = &pf->hw;
int status;
@@ -1644,10 +1755,39 @@ int ice_devlink_register_params(struct ice_pf *pf)
if (status)
return status;
+ status = devl_params_register(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+ if (status)
+ goto unregister_rdma_params;
+
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
status = devl_params_register(devlink, ice_dvl_sched_params,
ARRAY_SIZE(ice_dvl_sched_params));
+ if (status)
+ goto unregister_msix_params;
+
+ value.vu32 = pf->msix.max;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ value);
+ value.vu32 = pf->msix.min;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ value);
+
+ value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ value);
+ return 0;
+
+unregister_msix_params:
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+unregister_rdma_params:
+ devl_params_unregister(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
return status;
}
@@ -1658,6 +1798,8 @@ void ice_devlink_unregister_params(struct ice_pf *pf)
devl_params_unregister(devlink, ice_dvl_rdma_params,
ARRAY_SIZE(ice_dvl_rdma_params));
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
devl_params_unregister(devlink, ice_dvl_sched_params,
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
new file mode 100644
index 000000000000..19c3d37aa768
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_adminq_cmd.h" /* for enum ice_aqc_health_status_elem */
+#include "health.h"
+
+#define ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, obj, name) \
+ devlink_fmsg_put(fmsg, #name, (obj)->name)
+
+#define ICE_HEALTH_STATUS_DATA_SIZE 2
+
+struct ice_health_status {
+ enum ice_aqc_health_status code;
+ const char *description;
+ const char *solution;
+ const char *data_label[ICE_HEALTH_STATUS_DATA_SIZE];
+};
+
+/*
+ * In addition to the health status codes provided below, the firmware might
+ * generate Health Status Codes that are not pertinent to the end-user.
+ * For instance, Health Code 0x1002 is triggered when the command fails.
+ * Such codes should be disregarded by the end-user.
+ * The below lookup requires to be sorted by code.
+ */
+
+static const char ice_common_port_solutions[] =
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.";
+static const char ice_port_number_label[] = "Port Number";
+static const char ice_update_nvm_solution[] = "Update to the latest NVM image.";
+
+static const struct ice_health_status ice_health_status_lookup[] = {
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE, "Module type is not supported.",
+ "Change or replace the module or cable.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL, "Module is not qualified.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM,
+ "Device cannot communicate with the module.",
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT, "Unresolved module conflict.",
+ "Manually set speed/duplex or change the port option. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT, "Module is not present.",
+ "Check that the module is inserted correctly. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED, "Underutilized module.",
+ "Change or replace the module or cable. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG, "Invalid link configuration.",
+ NULL, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS, "Port hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE, "A port is unreachable.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED, "Port speed is limited due to module.",
+ "Change the module or configure the port option to match the current module speed. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT,
+ "All configured link modes were attempted but failed to establish link. The device will restart the process to establish link.",
+ "Check link partner connection and configuration.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED,
+ "Port speed is limited by PHY capabilities.",
+ "Change the module to align to port option.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO, "LOM topology netlist is corrupted.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST, "Unrecoverable netlist error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT, "Port topology conflict.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS, "Unrecoverable hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME, "Unrecoverable runtime error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT, "Link management engine failed to initialize.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD,
+ "Failed to load the firmware image in the external PHY.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_RECOVERY, "The device is in firmware recovery mode.",
+ ice_update_nvm_solution, {"Extended Error"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS, "The flash chip cannot be accessed.",
+ "If issue persists, call customer support.", {"Access Type"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH, "NVM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH, "Option ROM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH, "DDP package authentication failed.",
+ "Update to latest base driver and DDP package."},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT, "NVM image is incompatible.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT, "Option ROM is incompatible.",
+ ice_update_nvm_solution, {"Expected PCI Device ID", "Expected Module ID"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB,
+ "Supplied MIB file is invalid. DCB reverted to default configuration.",
+ "Disable FW-LLDP and check DCBx system configuration.",
+ {ice_port_number_label, "MIB ID"}},
+};
+
+static int ice_health_status_lookup_compare(const void *a, const void *b)
+{
+ return ((struct ice_health_status *)a)->code - ((struct ice_health_status *)b)->code;
+}
+
+static const struct ice_health_status *ice_get_health_status(u16 code)
+{
+ struct ice_health_status key = { .code = code };
+
+ return bsearch(&key, ice_health_status_lookup, ARRAY_SIZE(ice_health_status_lookup),
+ sizeof(struct ice_health_status), ice_health_status_lookup_compare);
+}
+
+static void ice_describe_status_code(struct devlink_fmsg *fmsg,
+ struct ice_aqc_health_status_elem *hse)
+{
+ static const char *const aux_label[] = { "Aux Data 1", "Aux Data 2" };
+ const struct ice_health_status *health_code;
+ u32 internal_data[2];
+ u16 status_code;
+
+ status_code = le16_to_cpu(hse->health_status_code);
+
+ devlink_fmsg_put(fmsg, "Syndrome", status_code);
+ if (status_code) {
+ internal_data[0] = le32_to_cpu(hse->internal_data1);
+ internal_data[1] = le32_to_cpu(hse->internal_data2);
+
+ health_code = ice_get_health_status(status_code);
+ if (!health_code)
+ return;
+
+ devlink_fmsg_string_pair_put(fmsg, "Description", health_code->description);
+ if (health_code->solution)
+ devlink_fmsg_string_pair_put(fmsg, "Possible Solution",
+ health_code->solution);
+
+ for (size_t i = 0; i < ICE_HEALTH_STATUS_DATA_SIZE; i++) {
+ if (internal_data[i] != ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA)
+ devlink_fmsg_u32_pair_put(fmsg,
+ health_code->data_label[i] ?
+ health_code->data_label[i] :
+ aux_label[i],
+ internal_data[i]);
+ }
+ }
+}
+
+static int
+ice_port_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_port_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack __always_unused *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static void ice_config_health_events(struct ice_pf *pf, bool enable)
+{
+ u8 enable_bits = 0;
+ int ret;
+
+ if (enable)
+ enable_bits = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK |
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK;
+
+ ret = ice_aq_set_health_status_cfg(&pf->hw, enable_bits);
+ if (ret)
+ dev_err(ice_pf_to_dev(pf), "Failed to %s firmware health events, err %d aq_err %s\n",
+ str_enable_disable(enable), ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status));
+}
+
+/**
+ * ice_process_health_status_event - Process the health status event from FW
+ * @pf: pointer to the PF structure
+ * @event: event structure containing the Health Status Event opcode
+ *
+ * Decode the Health Status Events and print the associated messages
+ */
+void ice_process_health_status_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ const struct ice_aqc_health_status_elem *health_info;
+ u16 count;
+
+ health_info = (struct ice_aqc_health_status_elem *)event->msg_buf;
+ count = le16_to_cpu(event->desc.params.get_health_status.health_status_count);
+
+ if (count > (event->buf_len / sizeof(*health_info))) {
+ dev_err(ice_pf_to_dev(pf), "Received a health status event with invalid element count\n");
+ return;
+ }
+
+ for (size_t i = 0; i < count; i++) {
+ const struct ice_health_status *health_code;
+ u16 status_code;
+
+ status_code = le16_to_cpu(health_info->health_status_code);
+ health_code = ice_get_health_status(status_code);
+
+ if (health_code) {
+ switch (le16_to_cpu(health_info->event_source)) {
+ case ICE_AQC_HEALTH_STATUS_GLOBAL:
+ pf->health_reporters.fw_status = *health_info;
+ devlink_health_report(pf->health_reporters.fw,
+ "FW syndrome reported", NULL);
+ break;
+ case ICE_AQC_HEALTH_STATUS_PF:
+ case ICE_AQC_HEALTH_STATUS_PORT:
+ pf->health_reporters.port_status = *health_info;
+ devlink_health_report(pf->health_reporters.port,
+ "Port syndrome reported", NULL);
+ break;
+ default:
+ dev_err(ice_pf_to_dev(pf), "Health code with unknown source\n");
+ }
+ } else {
+ u32 data1, data2;
+ u16 source;
+
+ source = le16_to_cpu(health_info->event_source);
+ data1 = le32_to_cpu(health_info->internal_data1);
+ data2 = le32_to_cpu(health_info->internal_data2);
+ dev_dbg(ice_pf_to_dev(pf),
+ "Received internal health status code 0x%08x, source: 0x%08x, data1: 0x%08x, data2: 0x%08x",
+ status_code, source, data1, data2);
+ }
+ health_info++;
+ }
+}
+
+/**
+ * ice_devlink_health_report - boilerplate to call given @reporter
+ *
+ * @reporter: devlink health reporter to call, do nothing on NULL
+ * @msg: message to pass up, "event name" is fine
+ * @priv_ctx: typically some event struct
+ */
+static void ice_devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx)
+{
+ if (!reporter)
+ return;
+
+ /* We do not do auto recovering, so return value of the below function
+ * will always be 0, thus we do ignore it.
+ */
+ devlink_health_report(reporter, msg, priv_ctx);
+}
+
+struct ice_mdd_event {
+ enum ice_mdd_src src;
+ u16 vf_num;
+ u16 queue;
+ u8 pf_num;
+ u8 event;
+};
+
+static const char *ice_mdd_src_to_str(enum ice_mdd_src src)
+{
+ switch (src) {
+ case ICE_MDD_SRC_TX_PQM:
+ return "tx_pqm";
+ case ICE_MDD_SRC_TX_TCLAN:
+ return "tx_tclan";
+ case ICE_MDD_SRC_TX_TDPU:
+ return "tx_tdpu";
+ case ICE_MDD_SRC_RX:
+ return "rx";
+ default:
+ return "invalid";
+ }
+}
+
+static int
+ice_mdd_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_mdd_event *mdd_event = priv_ctx;
+ const char *src;
+
+ if (!mdd_event)
+ return 0;
+
+ src = ice_mdd_src_to_str(mdd_event->src);
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_put(fmsg, "src", src);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, pf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, vf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, event);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, queue);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+/**
+ * ice_report_mdd_event - Report an MDD event through devlink health
+ * @pf: the PF device structure
+ * @src: the HW block that was the source of this MDD event
+ * @pf_num: the pf_num on which the MDD event occurred
+ * @vf_num: the vf_num on which the MDD event occurred
+ * @event: the event type of the MDD event
+ * @queue: the queue on which the MDD event occurred
+ *
+ * Report an MDD event that has occurred on this PF.
+ */
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue)
+{
+ struct ice_mdd_event ev = {
+ .src = src,
+ .pf_num = pf_num,
+ .vf_num = vf_num,
+ .event = event,
+ .queue = queue,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.mdd, "MDD event", &ev);
+}
+
+/**
+ * ice_fmsg_put_ptr - put hex value of pointer into fmsg
+ *
+ * @fmsg: devlink fmsg under construction
+ * @name: name to pass
+ * @ptr: 64 bit value to print as hex and put into fmsg
+ */
+static void ice_fmsg_put_ptr(struct devlink_fmsg *fmsg, const char *name,
+ void *ptr)
+{
+ char buf[sizeof(ptr) * 3];
+
+ sprintf(buf, "%p", ptr);
+ devlink_fmsg_put(fmsg, name, buf);
+}
+
+struct ice_tx_hang_event {
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ u16 queue;
+ u16 next_to_clean;
+ u16 next_to_use;
+ struct ice_tx_ring *tx_ring;
+};
+
+static int ice_tx_hang_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_tx_hang_event *event = priv_ctx;
+ struct sk_buff *skb;
+
+ if (!event)
+ return 0;
+
+ skb = event->tx_ring->tx_buf->skb;
+ devlink_fmsg_obj_nest_start(fmsg);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, head);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, intr);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, vsi_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, queue);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_clean);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_use);
+ devlink_fmsg_put(fmsg, "irq-mapping", event->tx_ring->q_vector->name);
+ ice_fmsg_put_ptr(fmsg, "desc-ptr", event->tx_ring->desc);
+ ice_fmsg_put_ptr(fmsg, "dma-ptr", (void *)(long)event->tx_ring->dma);
+ ice_fmsg_put_ptr(fmsg, "skb-ptr", skb);
+ devlink_fmsg_binary_pair_put(fmsg, "desc", event->tx_ring->desc,
+ event->tx_ring->count * sizeof(struct ice_tx_desc));
+ devlink_fmsg_dump_skb(fmsg, skb);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+
+ buf->tx_ring = tx_ring;
+ buf->vsi_num = vsi_num;
+ buf->head = head;
+ buf->intr = intr;
+}
+
+void ice_report_tx_hang(struct ice_pf *pf)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+ struct ice_tx_ring *tx_ring = buf->tx_ring;
+
+ struct ice_tx_hang_event ev = {
+ .head = buf->head,
+ .intr = buf->intr,
+ .vsi_num = buf->vsi_num,
+ .queue = tx_ring->q_index,
+ .next_to_clean = tx_ring->next_to_clean,
+ .next_to_use = tx_ring->next_to_use,
+ .tx_ring = tx_ring,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.tx_hang, "Tx hang", &ev);
+}
+
+static struct devlink_health_reporter *
+ice_init_devlink_rep(struct ice_pf *pf,
+ const struct devlink_health_reporter_ops *ops)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct devlink_health_reporter *rep;
+ const u64 graceful_period = 0;
+
+ rep = devl_health_reporter_create(devlink, ops, graceful_period, pf);
+ if (IS_ERR(rep)) {
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_err(dev, "failed to create devlink %s health report er",
+ ops->name);
+ return NULL;
+ }
+ return rep;
+}
+
+#define ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field) \
+ ._field = ice_##_name##_reporter_##_field,
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_1(_name, _field1) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ }
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_2(_name, _field1, _field2) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field2) \
+ }
+
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(mdd, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(tx_hang, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(fw, dump, diagnose);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(port, dump, diagnose);
+
+/**
+ * ice_health_init - allocate and init all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_init(struct ice_pf *pf)
+{
+ struct ice_health *reps = &pf->health_reporters;
+
+ reps->mdd = ice_init_devlink_rep(pf, &ice_mdd_reporter_ops);
+ reps->tx_hang = ice_init_devlink_rep(pf, &ice_tx_hang_reporter_ops);
+
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ reps->fw = ice_init_devlink_rep(pf, &ice_fw_reporter_ops);
+ reps->port = ice_init_devlink_rep(pf, &ice_port_reporter_ops);
+ ice_config_health_events(pf, true);
+ }
+}
+
+/**
+ * ice_deinit_devl_reporter - destroy given devlink health reporter
+ * @reporter: reporter to destroy
+ */
+static void ice_deinit_devl_reporter(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devl_health_reporter_destroy(reporter);
+}
+
+/**
+ * ice_health_deinit - deallocate all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_deinit(struct ice_pf *pf)
+{
+ ice_deinit_devl_reporter(pf->health_reporters.mdd);
+ ice_deinit_devl_reporter(pf->health_reporters.tx_hang);
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ ice_deinit_devl_reporter(pf->health_reporters.fw);
+ ice_deinit_devl_reporter(pf->health_reporters.port);
+ ice_config_health_events(pf, false);
+ }
+}
+
+static
+void ice_health_assign_healthy_state(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devlink_health_reporter_state_update(reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+}
+
+/**
+ * ice_health_clear - clear devlink health issues after a reset
+ * @pf: the PF device structure
+ *
+ * Mark the PF in healthy state again after a reset has completed.
+ */
+void ice_health_clear(struct ice_pf *pf)
+{
+ ice_health_assign_healthy_state(pf->health_reporters.mdd);
+ ice_health_assign_healthy_state(pf->health_reporters.tx_hang);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.h b/drivers/net/ethernet/intel/ice/devlink/health.h
new file mode 100644
index 000000000000..5edfc4d2adce
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _HEALTH_H_
+#define _HEALTH_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: health.h
+ *
+ * This header file stores everything that is needed for broadly understood
+ * devlink health mechanism for ice driver.
+ */
+
+struct ice_aqc_health_status_elem;
+struct ice_pf;
+struct ice_tx_ring;
+struct ice_rq_event_info;
+
+enum ice_mdd_src {
+ ICE_MDD_SRC_TX_PQM,
+ ICE_MDD_SRC_TX_TCLAN,
+ ICE_MDD_SRC_TX_TDPU,
+ ICE_MDD_SRC_RX,
+};
+
+/**
+ * struct ice_health - stores ice devlink health reporters and accompanied data
+ * @fw: devlink health reporter for FW Health Status events
+ * @mdd: devlink health reporter for MDD detection event
+ * @port: devlink health reporter for Port Health Status events
+ * @tx_hang: devlink health reporter for tx_hang event
+ * @tx_hang_buf: pre-allocated place to put info for Tx hang reporter from
+ * non-sleeping context
+ * @tx_ring: ring that the hang occurred on
+ * @head: descriptor head
+ * @intr: interrupt register value
+ * @vsi_num: VSI owning the queue that the hang occurred on
+ * @fw_status: buffer for last received FW Status event
+ * @port_status: buffer for last received Port Status event
+ */
+struct ice_health {
+ struct devlink_health_reporter *fw;
+ struct devlink_health_reporter *mdd;
+ struct devlink_health_reporter *port;
+ struct devlink_health_reporter *tx_hang;
+ struct_group_tagged(ice_health_tx_hang_buf, tx_hang_buf,
+ struct ice_tx_ring *tx_ring;
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ );
+ struct ice_aqc_health_status_elem fw_status;
+ struct ice_aqc_health_status_elem port_status;
+};
+
+void ice_process_health_status_event(struct ice_pf *pf,
+ struct ice_rq_event_info *event);
+
+void ice_health_init(struct ice_pf *pf);
+void ice_health_deinit(struct ice_pf *pf);
+void ice_health_clear(struct ice_pf *pf);
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr);
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue);
+void ice_report_tx_hang(struct ice_pf *pf);
+
+#endif /* _HEALTH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/port.c
index c6779d9dffff..767419a67fef 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/port.c
@@ -5,7 +5,7 @@
#include "ice.h"
#include "devlink.h"
-#include "devlink_port.h"
+#include "port.h"
#include "ice_lib.h"
#include "ice_fltr.h"
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/port.h
index d60efc340945..d60efc340945 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
+++ b/drivers/net/ethernet/intel/ice/devlink/port.h
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2f5d6f974185..ddd0ad68185b 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -78,6 +78,7 @@
#include "ice_irq.h"
#include "ice_dpll.h"
#include "ice_adapter.h"
+#include "devlink/health.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -96,9 +97,6 @@
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 2
-#define ICE_RDMA_NUM_AEQ_MSIX 4
-#define ICE_MIN_RDMA_MSIX 2
-#define ICE_ESWITCH_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -195,14 +193,13 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
-#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned)
-
enum ice_feature {
ICE_F_DSCP,
ICE_F_PHY_RCLK,
ICE_F_SMA_CTRL,
ICE_F_CGU,
ICE_F_GNSS,
+ ICE_F_GCS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
ICE_F_MBX_LIMIT,
@@ -402,7 +399,6 @@ struct ice_vsi {
u16 req_rxq; /* User requested Rx queues */
u16 num_rx_desc;
u16 num_tx_desc;
- u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */
@@ -477,9 +473,6 @@ struct ice_q_vector {
struct ice_ring_container rx;
struct ice_ring_container tx;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
-
struct ice_channel *ch;
char name[ICE_INT_NAME_STR_LEN];
@@ -519,6 +512,7 @@ enum ice_pf_flags {
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
+ ICE_FLAG_LLDP_AQ_FLTR,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -541,6 +535,14 @@ struct ice_agg_node {
u8 valid;
};
+struct ice_pf_msix {
+ u32 cur;
+ u32 min;
+ u32 max;
+ u32 total;
+ u32 rest;
+};
+
struct ice_pf {
struct pci_dev *pdev;
struct ice_adapter *adapter;
@@ -553,15 +555,8 @@ struct ice_pf {
struct devlink_port devlink_port;
/* OS reserved IRQ details */
- struct msix_entry *msix_entries;
struct ice_irq_tracker irq_tracker;
- /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
- * number of MSIX vectors needed for all SR-IOV VFs from the number of
- * MSIX vectors allowed on this PF.
- */
- u16 sriov_base_vector;
- unsigned long *sriov_irq_bm; /* bitmap to track irq usage */
- u16 sriov_irq_size; /* size of the irq_bm bitmap */
+ struct ice_virt_irq_tracker virt_irq_tracker;
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
@@ -594,7 +589,6 @@ struct ice_pf {
struct gnss_serial *gnss_serial;
struct gnss_device *gnss_dev;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
- u16 rdma_base_vector;
/* spinlock to protect the AdminQ wait list */
spinlock_t aq_wait_lock;
@@ -611,7 +605,7 @@ struct ice_pf {
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
- u16 num_lan_msix; /* Total MSIX vectors for base driver */
+ struct ice_pf_msix msix;
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -627,14 +621,12 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded:1; /* has previous stats been loaded */
- u8 rdma_mode;
u16 dcbx_cap;
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
char int_name_ll_ts[ICE_INT_NAME_STR_LEN];
- struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
/* count of tc_flower filters specific to channel (aka where filter
@@ -665,6 +657,8 @@ struct ice_pf {
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
struct ice_dplls dplls;
struct device *hwmon_dev;
+ struct ice_health health_reporters;
+ struct iidc_rdma_core_dev_info *cdev_info;
u8 num_quanta_prof_used;
};
@@ -1045,10 +1039,63 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
-static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw)
+extern const struct xdp_metadata_ops ice_xdp_md_ops;
+
+/**
+ * ice_is_dual - Check if given config is multi-NAC
+ * @hw: pointer to HW structure
+ *
+ * Return: true if the device is running in mutli-NAC (Network
+ * Acceleration Complex) configuration variant, false otherwise
+ * (always false for non-E825 devices).
+ */
+static inline bool ice_is_dual(struct ice_hw *hw)
{
- return hw->ptp.phy_model;
+ return hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
+ (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
}
-extern const struct xdp_metadata_ops ice_xdp_md_ops;
+/**
+ * ice_is_primary - Check if given device belongs to the primary complex
+ * @hw: pointer to HW structure
+ *
+ * Check if given PF/HW is running on primary complex in multi-NAC
+ * configuration.
+ *
+ * Return: true if the device is dual, false otherwise (always true
+ * for non-E825 devices).
+ */
+static inline bool ice_is_primary(struct ice_hw *hw)
+{
+ return hw->mac_type != ICE_MAC_GENERIC_3K_E825 ||
+ !ice_is_dual(hw) ||
+ (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M);
+}
+
+/**
+ * ice_pf_src_tmr_owned - Check if a primary timer is owned by PF
+ * @pf: pointer to PF structure
+ *
+ * Return: true if PF owns primary timer, false otherwise.
+ */
+static inline bool ice_pf_src_tmr_owned(struct ice_pf *pf)
+{
+ return pf->hw.func_caps.ts_func_info.src_tmr_owned &&
+ ice_is_primary(&pf->hw);
+}
+
+/**
+ * ice_get_primary_hw - Get pointer to primary ice_hw structure
+ * @pf: pointer to PF structure
+ *
+ * Return: A pointer to ice_hw structure with access to timesync
+ * register space.
+ */
+static inline struct ice_hw *ice_get_primary_hw(struct ice_pf *pf)
+{
+ if (!pf->adapter->ctrl_pf)
+ return &pf->hw;
+ else
+ return &pf->adapter->ctrl_pf->hw;
+}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
index 01a08cfd0090..66e070095d1b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright Red Hat
-#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -14,32 +13,16 @@
static DEFINE_XARRAY(ice_adapters);
static DEFINE_MUTEX(ice_adapters_mutex);
-/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
-#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
-#define INDEX_FIELD_DEV GENMASK(31, 16)
-#define INDEX_FIELD_BUS GENMASK(12, 5)
-#define INDEX_FIELD_SLOT GENMASK(4, 0)
-
-static unsigned long ice_adapter_index(const struct pci_dev *pdev)
+static unsigned long ice_adapter_index(u64 dsn)
{
- unsigned int domain = pci_domain_nr(pdev->bus);
-
- WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
-
- switch (pdev->device) {
- case ICE_DEV_ID_E825C_BACKPLANE:
- case ICE_DEV_ID_E825C_QSFP:
- case ICE_DEV_ID_E825C_SFP:
- case ICE_DEV_ID_E825C_SGMII:
- return FIELD_PREP(INDEX_FIELD_DEV, pdev->device);
- default:
- return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
- FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
- FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
- }
+#if BITS_PER_LONG == 64
+ return dsn;
+#else
+ return (u32)dsn ^ (u32)(dsn >> 32);
+#endif
}
-static struct ice_adapter *ice_adapter_new(void)
+static struct ice_adapter *ice_adapter_new(u64 dsn)
{
struct ice_adapter *adapter;
@@ -47,6 +30,7 @@ static struct ice_adapter *ice_adapter_new(void)
if (!adapter)
return NULL;
+ adapter->device_serial_number = dsn;
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
refcount_set(&adapter->refcount, 1);
@@ -77,23 +61,26 @@ static void ice_adapter_free(struct ice_adapter *adapter)
* Return: Pointer to ice_adapter on success.
* ERR_PTR() on error. -ENOMEM is the only possible error.
*/
-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
+struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
{
- unsigned long index = ice_adapter_index(pdev);
+ u64 dsn = pci_get_dsn(pdev);
struct ice_adapter *adapter;
+ unsigned long index;
int err;
+ index = ice_adapter_index(dsn);
scoped_guard(mutex, &ice_adapters_mutex) {
err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
if (err == -EBUSY) {
adapter = xa_load(&ice_adapters, index);
refcount_inc(&adapter->refcount);
+ WARN_ON_ONCE(adapter->device_serial_number != dsn);
return adapter;
}
if (err)
return ERR_PTR(err);
- adapter = ice_adapter_new();
+ adapter = ice_adapter_new(dsn);
if (!adapter)
return ERR_PTR(-ENOMEM);
xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
@@ -110,11 +97,13 @@ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
*
* Context: Process, may sleep.
*/
-void ice_adapter_put(const struct pci_dev *pdev)
+void ice_adapter_put(struct pci_dev *pdev)
{
- unsigned long index = ice_adapter_index(pdev);
+ u64 dsn = pci_get_dsn(pdev);
struct ice_adapter *adapter;
+ unsigned long index;
+ index = ice_adapter_index(dsn);
scoped_guard(mutex, &ice_adapters_mutex) {
adapter = xa_load(&ice_adapters, index);
if (WARN_ON(!adapter))
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
index e233225848b3..ac15c0d2bc1a 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -32,6 +32,7 @@ struct ice_port_list {
* @refcount: Reference count. struct ice_pf objects hold the references.
* @ctrl_pf: Control PF of the adapter
* @ports: Ports list
+ * @device_serial_number: DSN cached for collision detection on 32bit systems
*/
struct ice_adapter {
refcount_t refcount;
@@ -40,9 +41,10 @@ struct ice_adapter {
struct ice_pf *ctrl_pf;
struct ice_port_list ports;
+ u64 device_serial_number;
};
-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
-void ice_adapter_put(const struct pci_dev *pdev);
+struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
+void ice_adapter_put(struct pci_dev *pdev);
#endif /* _ICE_ADAPTER_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 1489a8ceec51..bdee499f991a 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -12,6 +12,13 @@
#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
+#define ICE_RXQ_CTX_SIZE_DWORDS 8
+#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
+#define ICE_TXQ_CTX_SZ 22
+
+typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t;
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
+
struct ice_aqc_generic {
__le32 param0;
__le32 param1;
@@ -1491,7 +1498,6 @@ struct ice_aqc_dnl_equa_param {
#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
-#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT)
@@ -1665,6 +1671,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
u8 global_scid[2];
u8 phy_scid[2];
@@ -1807,6 +1814,7 @@ struct ice_aqc_nvm_pass_comp_tbl {
#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
#define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
#define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+#define ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK 0x3
u8 component_response_code; /* Response only */
#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
#define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
@@ -2084,10 +2092,10 @@ struct ice_aqc_add_txqs_perq {
__le16 txq_id;
u8 rsvd[2];
__le32 q_teid;
- u8 txq_ctx[22];
+ ice_txq_ctx_buf_t txq_ctx;
u8 rsvd2[2];
struct ice_aqc_txsched_elem info;
-};
+} __packed;
/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
@@ -2264,6 +2272,8 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0)
+
/* Get CGU abilities command response data structure (indirect 0x0C61) */
struct ice_aqc_get_cgu_abilities {
u8 num_inputs;
@@ -2508,6 +2518,87 @@ enum ice_aqc_fw_logging_mod {
ICE_AQC_FW_LOG_ID_MAX,
};
+enum ice_aqc_health_status_mask {
+ ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK = BIT(0),
+ ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK = BIT(1),
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK = BIT(2),
+};
+
+/* Set Health Status (direct 0xFF20) */
+struct ice_aqc_set_health_status_cfg {
+ u8 event_source;
+ u8 reserved[15];
+};
+
+enum ice_aqc_health_status {
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT = 0x101,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE = 0x102,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL = 0x103,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM = 0x104,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT = 0x105,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT = 0x106,
+ ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED = 0x107,
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT = 0x108,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE = 0x109,
+ ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG = 0x10B,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS = 0x10C,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE = 0x10D,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED = 0x10F,
+ ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT = 0x110,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED = 0x111,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO = 0x112,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST = 0x113,
+ ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT = 0x114,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS = 0x115,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME = 0x116,
+ ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT = 0x117,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_NVM_PROG = 0x120,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD = 0x121,
+ ICE_AQC_HEALTH_STATUS_INFO_RECOVERY = 0x500,
+ ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS = 0x501,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH = 0x502,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH = 0x503,
+ ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH = 0x504,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT = 0x505,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT = 0x506,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION = 0x507,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION = 0x508,
+ ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB = 0x509,
+ ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT = 0x50A,
+ ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET = 0x50B,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL = 0x50C,
+ ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL = 0x50D,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP = 0x1000,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL = 0x1001,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ = 0x1002,
+};
+
+/* Get Health Status (indirect 0xFF22) */
+struct ice_aqc_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+enum ice_aqc_health_status_scope {
+ ICE_AQC_HEALTH_STATUS_PF = 0x1,
+ ICE_AQC_HEALTH_STATUS_PORT = 0x2,
+ ICE_AQC_HEALTH_STATUS_GLOBAL = 0x3,
+};
+
+#define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA 0xDEADBEEF
+
+/* Get Health Status event buffer entry (0xFF22),
+ * repeated per reported health status.
+ */
+struct ice_aqc_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+ __le32 internal_data1;
+ __le32 internal_data2;
+};
+
/* Set FW Logging configuration (indirect 0xFF30)
* Register for FW Logging (indirect 0xFF31)
* Query FW Logging (indirect 0xFF32)
@@ -2648,6 +2739,8 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_set_health_status_cfg set_health_status_cfg;
+ struct ice_aqc_get_health_status get_health_status;
struct ice_aqc_dnl_call_command dnl_call;
struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
@@ -2850,6 +2943,10 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
+ /* System Diagnostic commands */
+ ice_aqc_opc_set_health_status_cfg = 0xFF20,
+ ice_aqc_opc_get_health_status = 0xFF22,
+
/* FW Logging Commands */
ice_aqc_opc_fw_logs_config = 0xFF30,
ice_aqc_opc_fw_logs_register = 0xFF31,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 7cee365cc7d1..1f7834c03550 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -378,6 +378,50 @@ ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
}
/**
+ * ice_arfs_cmp - Check if aRFS filter matches this flow.
+ * @fltr_info: filter info of the saved ARFS entry.
+ * @fk: flow dissector keys.
+ * @n_proto: One of htons(ETH_P_IP) or htons(ETH_P_IPV6).
+ * @ip_proto: One of IPPROTO_TCP or IPPROTO_UDP.
+ *
+ * Since this function assumes limited values for n_proto and ip_proto, it
+ * is meant to be called only from ice_rx_flow_steer().
+ *
+ * Return:
+ * * true - fltr_info refers to the same flow as fk.
+ * * false - fltr_info and fk refer to different flows.
+ */
+static bool
+ice_arfs_cmp(const struct ice_fdir_fltr *fltr_info, const struct flow_keys *fk,
+ __be16 n_proto, u8 ip_proto)
+{
+ /* Determine if the filter is for IPv4 or IPv6 based on flow_type,
+ * which is one of ICE_FLTR_PTYPE_NONF_IPV{4,6}_{TCP,UDP}.
+ */
+ bool is_v4 = fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+
+ /* Following checks are arranged in the quickest and most discriminative
+ * fields first for early failure.
+ */
+ if (is_v4)
+ return n_proto == htons(ETH_P_IP) &&
+ fltr_info->ip.v4.src_port == fk->ports.src &&
+ fltr_info->ip.v4.dst_port == fk->ports.dst &&
+ fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src &&
+ fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst &&
+ fltr_info->ip.v4.proto == ip_proto;
+
+ return fltr_info->ip.v6.src_port == fk->ports.src &&
+ fltr_info->ip.v6.dst_port == fk->ports.dst &&
+ fltr_info->ip.v6.proto == ip_proto &&
+ !memcmp(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr)) &&
+ !memcmp(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
+}
+
+/**
* ice_rx_flow_steer - steer the Rx flow to where application is being run
* @netdev: ptr to the netdev being adjusted
* @skb: buffer with required header information
@@ -448,6 +492,10 @@ ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
continue;
fltr_info = &arfs_entry->fltr_info;
+
+ if (!ice_arfs_cmp(fltr_info, &fk, n_proto, ip_proto))
+ continue;
+
ret = fltr_info->fltr_id;
if (fltr_info->q_index == rxq_idx ||
@@ -511,7 +559,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
struct hlist_head *arfs_fltr_list;
unsigned int i;
- if (!vsi || vsi->type != ICE_VSI_PF)
+ if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
return;
arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
@@ -571,25 +619,6 @@ void ice_clear_arfs(struct ice_vsi *vsi)
}
/**
- * ice_free_cpu_rx_rmap - free setup CPU reverse map
- * @vsi: the VSI to be forwarded to
- */
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
-{
- struct net_device *netdev;
-
- if (!vsi || vsi->type != ICE_VSI_PF)
- return;
-
- netdev = vsi->netdev;
- if (!netdev || !netdev->rx_cpu_rmap)
- return;
-
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
-}
-
-/**
* ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
* @vsi: the VSI to be forwarded to
*/
@@ -597,7 +626,6 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
struct ice_pf *pf;
- int i;
if (!vsi || vsi->type != ICE_VSI_PF)
return 0;
@@ -610,18 +638,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
vsi->type, netdev->name, vsi->num_q_vectors);
- netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
- if (unlikely(!netdev->rx_cpu_rmap))
- return -EINVAL;
-
- ice_for_each_q_vector(vsi, i)
- if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
- vsi->q_vectors[i]->irq.virq)) {
- ice_free_cpu_rx_rmap(vsi);
- return -EINVAL;
- }
-
- return 0;
+ return netif_enable_cpu_rmap(netdev, vsi->num_q_vectors);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index 9669ad9bf7b5..9706293128c3 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -45,7 +45,6 @@ int
ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
u16 rxq_idx, u32 flow_id);
void ice_clear_arfs(struct ice_vsi *vsi);
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi);
void ice_init_arfs(struct ice_vsi *vsi);
void ice_sync_arfs_fltrs(struct ice_pf *pf);
int ice_set_cpu_rx_rmap(struct ice_vsi *vsi);
@@ -56,7 +55,6 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
enum ice_fltr_ptype flow_type);
#else
static inline void ice_clear_arfs(struct ice_vsi *vsi) { }
-static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { }
static inline void ice_init_arfs(struct ice_vsi *vsi) { }
static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { }
static inline void ice_remove_arfs(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 82a9cd4ec7ae..6db4ad8fc70b 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -147,10 +147,6 @@ skip_alloc:
q_vector->reg_idx = q_vector->irq.index;
q_vector->vf_reg_idx = q_vector->irq.index;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
-
/* This will not be called in the driver load path because the netdev
* will not be created yet. All other cases with register the NAPI
* handler here (i.e. resume, reset/rebuild, etc.)
@@ -276,7 +272,8 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
return;
- netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
+ netif_set_xps_queue(ring->netdev,
+ &ring->q_vector->napi.config->affinity_mask,
ring->q_index);
}
@@ -454,6 +451,9 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
+ /* Enable descriptor prefetch */
+ rlan_ctx.prefena = 1;
+
/* PF acts as uplink for switchdev; set flex descriptor with src_vsi
* metadata and flags to allow redirecting to PR netdev
*/
@@ -470,9 +470,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/
if (vsi->type != ICE_VSI_VF)
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
- else
- ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
- false);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -798,13 +795,11 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
return 0;
err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
- dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
+ dev_info(dev, "Failed to allocate %d q_vectors for VSI %d, new value %d",
+ vsi->num_q_vectors, vsi->vsi_num, v_idx);
+ vsi->num_q_vectors = v_idx;
+ return v_idx ? 0 : err;
}
/**
@@ -910,8 +905,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
+ ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index b22e71dc59d4..4fedf0181c4e 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -6,6 +6,7 @@
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
#include "ice_ptp_hw.h"
+#include <linux/packing.h>
#define ICE_PF_RESET_WAIT_COUNT 300
#define ICE_MAX_NETLIST_SIZE 10
@@ -185,7 +186,7 @@ static int ice_set_mac_type(struct ice_hw *hw)
* ice_is_generic_mac - check if device's mac_type is generic
* @hw: pointer to the hardware structure
*
- * Return: true if mac_type is generic (with SBQ support), false if not
+ * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise.
*/
bool ice_is_generic_mac(struct ice_hw *hw)
{
@@ -194,117 +195,39 @@ bool ice_is_generic_mac(struct ice_hw *hw)
}
/**
- * ice_is_e810
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E810 based, false if not.
- */
-bool ice_is_e810(struct ice_hw *hw)
-{
- return hw->mac_type == ICE_MAC_E810;
-}
-
-/**
- * ice_is_e810t
- * @hw: pointer to the hardware structure
+ * ice_is_pf_c827 - check if pf contains c827 phy
+ * @hw: pointer to the hw struct
*
- * returns true if the device is E810T based, false if not.
+ * Return: true if the device has c827 phy.
*/
-bool ice_is_e810t(struct ice_hw *hw)
+static bool ice_is_pf_c827(struct ice_hw *hw)
{
- switch (hw->device_id) {
- case ICE_DEV_ID_E810C_SFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T:
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T4:
- case ICE_SUBDEV_ID_E810T6:
- case ICE_SUBDEV_ID_E810T7:
- return true;
- }
- break;
- case ICE_DEV_ID_E810C_QSFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T5:
- return true;
- }
- break;
- default:
- break;
- }
-
- return false;
-}
+ struct ice_aqc_get_link_topo cmd = {};
+ u8 node_part_number;
+ u16 node_handle;
+ int status;
-/**
- * ice_is_e822 - Check if a device is E822 family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E822 based, false if not.
- */
-bool ice_is_e822(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E822C_BACKPLANE:
- case ICE_DEV_ID_E822C_QSFP:
- case ICE_DEV_ID_E822C_SFP:
- case ICE_DEV_ID_E822C_10G_BASE_T:
- case ICE_DEV_ID_E822C_SGMII:
- case ICE_DEV_ID_E822L_BACKPLANE:
- case ICE_DEV_ID_E822L_SFP:
- case ICE_DEV_ID_E822L_10G_BASE_T:
- case ICE_DEV_ID_E822L_SGMII:
- return true;
- default:
+ if (hw->mac_type != ICE_MAC_E810)
return false;
- }
-}
-/**
- * ice_is_e823
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E823-L or E823-C based, false if not.
- */
-bool ice_is_e823(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E823L_BACKPLANE:
- case ICE_DEV_ID_E823L_SFP:
- case ICE_DEV_ID_E823L_10G_BASE_T:
- case ICE_DEV_ID_E823L_1GBE:
- case ICE_DEV_ID_E823L_QSFP:
- case ICE_DEV_ID_E823C_BACKPLANE:
- case ICE_DEV_ID_E823C_QSFP:
- case ICE_DEV_ID_E823C_SFP:
- case ICE_DEV_ID_E823C_10G_BASE_T:
- case ICE_DEV_ID_E823C_SGMII:
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
return true;
- default:
+
+ cmd.addr.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
+ cmd.addr.topo_params.index = 0;
+
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
return false;
- }
-}
-/**
- * ice_is_e825c - Check if a device is E825C family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E825-C based, false if not.
- */
-bool ice_is_e825c(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E825C_BACKPLANE:
- case ICE_DEV_ID_E825C_QSFP:
- case ICE_DEV_ID_E825C_SFP:
- case ICE_DEV_ID_E825C_SGMII:
+ if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
return true;
- default:
- return false;
- }
+
+ return false;
}
/**
@@ -542,7 +465,8 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
/**
* ice_find_netlist_node
* @hw: pointer to the hw struct
- * @node_type_ctx: type of netlist node to look for
+ * @node_type: type of netlist node to look for
+ * @ctx: context of the search
* @node_part_number: node part number to look for
* @node_handle: output parameter if node found - optional
*
@@ -552,10 +476,12 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
* valid if the function returns zero, and should be ignored on any non-zero
* return value.
*
- * Returns: 0 if the node is found, -ENOENT if no handle was found, and
- * a negative error code on failure to access the AQ.
+ * Return:
+ * * 0 if the node is found,
+ * * -ENOENT if no handle was found,
+ * * negative error code on failure to access the AQ.
*/
-static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
+static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx,
u8 node_part_number, u16 *node_handle)
{
u8 idx;
@@ -566,8 +492,8 @@ static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
int status;
cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M,
- node_type_ctx);
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, node_type) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ctx);
cmd.addr.topo_params.index = idx;
status = ice_aq_get_netlist_node(hw, &cmd,
@@ -1022,6 +948,33 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
+ * ice_wait_for_fw - wait for full FW readiness
+ * @hw: pointer to the hardware structure
+ * @timeout: milliseconds that can elapse before timing out
+ *
+ * Return: 0 on success, -ETIMEDOUT on timeout.
+ */
+static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+{
+ int fw_loading;
+ u32 elapsed = 0;
+
+ while (elapsed <= timeout) {
+ fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+
+ /* firmware was not yet loaded, we have to wait more */
+ if (fw_loading) {
+ elapsed += 100;
+ msleep(100);
+ continue;
+ }
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
@@ -1170,8 +1123,21 @@ int ice_init_hw(struct ice_hw *hw)
mutex_init(&hw->tnl_lock);
ice_init_chk_recipe_reuse_support(hw);
- return 0;
+ /* Some cards require longer initialization times
+ * due to necessity of loading FW from an external source.
+ * This can take even half a minute.
+ */
+ if (ice_is_pf_c827(hw)) {
+ status = ice_wait_for_fw(hw, 30000);
+ if (status) {
+ dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out");
+ goto err_unroll_fltr_mgmt_struct;
+ }
+ }
+
+ hw->lane_num = ice_get_phy_lane_number(hw);
+ return 0;
err_unroll_fltr_mgmt_struct:
ice_cleanup_fltr_mgmt_struct(hw);
err_unroll_sched:
@@ -1357,39 +1323,31 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
- * ice_copy_rxq_ctx_to_hw
+ * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers
* @hw: pointer to the hardware structure
- * @ice_rxq_ctx: pointer to the rxq context
+ * @rxq_ctx: pointer to the packed Rx queue context
* @rxq_index: the index of the Rx queue
- *
- * Copies rxq context from dense structure to HW register space
*/
-static int
-ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
+static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw,
+ const ice_rxq_ctx_buf_t *rxq_ctx,
+ u32 rxq_index)
{
- u8 i;
-
- if (!ice_rxq_ctx)
- return -EINVAL;
-
- if (rxq_index > QRX_CTRL_MAX_INDEX)
- return -EINVAL;
-
/* Copy each dword separately to HW */
- for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
- wr32(hw, QRX_CONTEXT(i, rxq_index),
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
+ u32 ctx = ((const u32 *)rxq_ctx)[i];
- ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
- }
+ wr32(hw, QRX_CONTEXT(i, rxq_index), ctx);
- return 0;
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx);
+ }
}
+#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \
+ PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field)
+
/* LAN Rx Queue Context */
-static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
- /* Field Width LSB */
+static const struct packed_field_u8 ice_rlan_ctx_fields[] = {
+ /* Field Width LSB */
ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
@@ -1410,35 +1368,50 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
- { 0 }
};
/**
- * ice_write_rxq_ctx
+ * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer
+ * @ctx: the Rx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Rx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout.
+ */
+static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx,
+ ice_rxq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_write_rxq_ctx - Write Rx Queue context to hardware
* @hw: pointer to the hardware structure
- * @rlan_ctx: pointer to the rxq context
+ * @rlan_ctx: pointer to the unpacked Rx queue context
* @rxq_index: the index of the Rx queue
*
- * Converts rxq context from sparse to dense structure and then writes
- * it to HW register space and enables the hardware to prefetch descriptors
- * instead of only fetching them on demand
+ * Pack the sparse Rx Queue context into dense hardware format and write it
+ * into the HW register space.
+ *
+ * Return: 0 on success, or -EINVAL if the Rx queue index is invalid.
*/
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
- u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ ice_rxq_ctx_buf_t buf = {};
- if (!rlan_ctx)
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
return -EINVAL;
- rlan_ctx->prefena = 1;
+ ice_pack_rxq_ctx(rlan_ctx, &buf);
+ ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index);
- ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
- return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+ return 0;
}
/* LAN Tx Queue Context */
-const struct ice_ctx_ele ice_tlan_ctx_info[] = {
+static const struct packed_field_u8 ice_tlan_ctx_fields[] = {
/* Field Width LSB */
ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
@@ -1467,10 +1440,22 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
- ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
- { 0 }
};
+/**
+ * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer
+ * @ctx: the Tx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout.
+ */
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
/* Sideband Queue command wrappers */
/**
@@ -2174,7 +2159,8 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->nvm_unified_update);
break;
case ICE_AQC_CAPS_RDMA:
- caps->rdma = (number == 1);
+ if (IS_ENABLED(CONFIG_INFINIBAND_IRDMA))
+ caps->rdma = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
break;
case ICE_AQC_CAPS_MAX_MTU:
@@ -2311,7 +2297,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- if (!ice_is_e825c(hw)) {
+ if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) {
info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
} else {
@@ -2544,6 +2530,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
+ info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0);
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2566,6 +2553,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
info->ts_ll_int_read);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n",
+ info->ll_phy_tmr_update);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2706,40 +2695,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
- * ice_is_pf_c827 - check if pf contains c827 phy
- * @hw: pointer to the hw struct
- */
-bool ice_is_pf_c827(struct ice_hw *hw)
-{
- struct ice_aqc_get_link_topo cmd = {};
- u8 node_part_number;
- u16 node_handle;
- int status;
-
- if (hw->mac_type != ICE_MAC_E810)
- return false;
-
- if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
- return true;
-
- cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
- cmd.addr.topo_params.index = 0;
-
- status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
- &node_handle);
-
- if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
- return false;
-
- if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
- return true;
-
- return false;
-}
-
-/**
* ice_is_phy_rclk_in_netlist
* @hw: pointer to the hw struct
*
@@ -2747,9 +2702,11 @@ bool ice_is_pf_c827(struct ice_hw *hw)
*/
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
{
- if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
- ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
return false;
@@ -2765,6 +2722,7 @@ bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
{
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
NULL))
return false;
@@ -2785,12 +2743,14 @@ bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
bool ice_is_cgu_in_netlist(struct ice_hw *hw)
{
if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
NULL)) {
hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
return true;
} else if (!ice_find_netlist_node(hw,
ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
NULL)) {
hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
@@ -2809,6 +2769,7 @@ bool ice_is_cgu_in_netlist(struct ice_hw *hw)
bool ice_is_gps_in_netlist(struct ice_hw *hw)
{
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
return false;
@@ -3475,7 +3436,7 @@ int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
msg.msg_addr_low = lower_16_bits(reg_offset);
msg.msg_addr_high = receiver_id;
msg.opcode = ice_sbq_msg_rd;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
err = ice_sbq_rw_reg(hw, &msg, flag);
if (err)
@@ -4087,6 +4048,59 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
}
/**
+ * ice_get_phy_lane_number - Get PHY lane number for current adapter
+ * @hw: pointer to the hw struct
+ *
+ * Return: PHY lane number on success, negative error code otherwise.
+ */
+int ice_get_phy_lane_number(struct ice_hw *hw)
+{
+ struct ice_aqc_get_port_options_elem *options;
+ unsigned int lport = 0;
+ unsigned int lane;
+ int err;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) {
+ u8 options_count = ICE_AQC_PORT_OPT_MAX;
+ u8 speed, active_idx, pending_idx;
+ bool active_valid, pending_valid;
+
+ err = ice_aq_get_port_options(hw, options, &options_count, lane,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (err)
+ goto err;
+
+ if (!active_valid)
+ continue;
+
+ speed = options[active_idx].max_lane_speed;
+ /* If we don't get speed for this lane, it's unoccupied */
+ if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G)
+ continue;
+
+ if (hw->pf_id == lport) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
+ ice_is_dual(hw) && !ice_is_primary(hw))
+ lane += ICE_PORTS_PER_QUAD;
+ kfree(options);
+ return lane;
+ }
+ lport++;
+ }
+
+ /* PHY lane not found */
+ err = -ENXIO;
+err:
+ kfree(options);
+ return err;
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
@@ -4549,205 +4563,6 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
/* End of FW Admin Queue command wrappers */
/**
- * ice_pack_ctx_byte - write a byte to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u8 src_byte, dest_byte, mask;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- src_byte = *from;
- src_byte <<= shift_width;
- src_byte &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_byte, dest, sizeof(dest_byte));
-
- dest_byte &= ~mask; /* get the bits not changing */
- dest_byte |= src_byte; /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_byte, sizeof(dest_byte));
-}
-
-/**
- * ice_pack_ctx_word - write a word to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u16 src_word, mask;
- __le16 dest_word;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_word = *(u16 *)from;
- src_word <<= shift_width;
- src_word &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_word, dest, sizeof(dest_word));
-
- dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
- dest_word |= cpu_to_le16(src_word); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_word, sizeof(dest_word));
-}
-
-/**
- * ice_pack_ctx_dword - write a dword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u32 src_dword, mask;
- __le32 dest_dword;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_dword = *(u32 *)from;
- src_dword <<= shift_width;
- src_dword &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_dword, dest, sizeof(dest_dword));
-
- dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
- dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_dword, sizeof(dest_dword));
-}
-
-/**
- * ice_pack_ctx_qword - write a qword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u64 src_qword, mask;
- __le64 dest_qword;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_qword = *(u64 *)from;
- src_qword <<= shift_width;
- src_qword &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_qword, dest, sizeof(dest_qword));
-
- dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
- dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_qword, sizeof(dest_qword));
-}
-
-/**
- * ice_set_ctx - set context bits in packed structure
- * @hw: pointer to the hardware structure
- * @src_ctx: pointer to a generic non-packed context structure
- * @dest_ctx: pointer to memory for the packed structure
- * @ce_info: List of Rx context elements
- */
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- int f;
-
- for (f = 0; ce_info[f].width; f++) {
- /* We have to deal with each element of the FW response
- * using the correct size so that we are correct regardless
- * of the endianness of the machine.
- */
- if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
- ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
- f, ce_info[f].width, ce_info[f].size_of);
- continue;
- }
- switch (ce_info[f].size_of) {
- case sizeof(u8):
- ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u16):
- ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u32):
- ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u64):
- ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/**
* ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
* @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
@@ -5841,6 +5656,96 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
+ * ice_get_pca9575_handle - find and return the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value.
+ *
+ * Return: 0 on success, -ENXIO when there's no PCA9575 present.
+ */
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+ int err;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -ENXIO;
+
+ /* If handle was not detected read it from the netlist */
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ cmd = &desc.params.get_link_topo;
+ cmd->addr.topo_params.node_type_ctx =
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL;
+ cmd->addr.topo_params.index = idx;
+
+ err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (err)
+ return -ENXIO;
+
+ /* Verify if we found the right IO expander type */
+ if (desc.params.get_link_topo.node_part_num !=
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -ENXIO;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_pca9575_reg - read the register from the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
@@ -6023,6 +5928,44 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
}
/**
+ * ice_is_fw_health_report_supported - checks if firmware supports health events
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if firmware supports health status reports,
+ * false otherwise
+ */
+bool ice_is_fw_health_report_supported(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ,
+ ICE_FW_API_HEALTH_REPORT_MIN,
+ ICE_FW_API_HEALTH_REPORT_PATCH);
+}
+
+/**
+ * ice_aq_set_health_status_cfg - Configure FW health events
+ * @hw: pointer to the HW struct
+ * @event_source: type of diagnostic events to enable
+ *
+ * Configure the health status event types that the firmware will send to this
+ * PF. The supported event types are: PF-specific, all PFs, and global.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source)
+{
+ struct ice_aqc_set_health_status_cfg *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_health_status_cfg;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg);
+
+ cmd->event_source = event_source;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the HW struct
* @mib_type: Local, Remote or both Local and Remote MIBs
@@ -6072,15 +6015,21 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
/**
* ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
* @hw: pointer to HW struct
- * @vsi_num: absolute HW index for VSI
+ * @vsi: VSI to add the filter to
* @add: boolean for if adding or removing a filter
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the operation cannot be performed
+ * with this HW or VSI, otherwise an error corresponding to
+ * the AQ transaction result.
*/
-int
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
+int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
struct ice_aq_desc desc;
+ if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw))
+ return -EOPNOTSUPP;
+
cmd = &desc.params.lldp_filter_ctrl;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
@@ -6090,7 +6039,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
else
cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
- cmd->vsi_num = cpu_to_le16(vsi_num);
+ cmd->vsi_num = cpu_to_le16(vsi->vsi_num);
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 27208a60cece..64c530b39191 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -92,9 +92,8 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
-extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info);
+
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf);
extern struct mutex ice_global_cfg_lock_sw;
@@ -113,7 +112,6 @@ int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-bool ice_is_pf_c827(struct ice_hw *hw);
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw);
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw);
bool ice_is_cgu_in_netlist(struct ice_hw *hw);
@@ -133,7 +131,6 @@ int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_generic_mac(struct ice_hw *hw);
-bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
@@ -143,6 +140,8 @@ int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+bool ice_is_fw_health_report_supported(struct ice_hw *hw);
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source);
int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
u8 serdes_num, int *output);
int
@@ -193,6 +192,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
+int ice_get_phy_lane_number(struct ice_hw *hw);
int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
@@ -275,10 +275,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
-bool ice_is_e810t(struct ice_hw *hw);
-bool ice_is_e822(struct ice_hw *hw);
-bool ice_is_e823(struct ice_hw *hw);
-bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -294,8 +290,7 @@ int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-int
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add);
int ice_lldp_execute_pending_mib(struct ice_hw *hw);
int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
@@ -305,5 +300,7 @@ int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle);
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 74418c445cc4..64737fc62306 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -1288,7 +1288,7 @@ ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = htonl(ouisubtype);
/* bytes 0 - 63 - IPv4 DSCP2UP LUT */
- for (i = 0; i < ICE_DSCP_NUM_VAL; i++) {
+ for (i = 0; i < DSCP_MAX; i++) {
/* IPv4 mapping */
buf[i] = dcbcfg->dscp_map[i];
/* IPv6 mapping */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a7c510832824..533eb8930aa8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -352,8 +352,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct device *dev = ice_pf_to_dev(pf);
+ struct iidc_rdma_event *event;
int ret = ICE_DCB_NO_HW_CHG;
- struct iidc_event *event;
struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
@@ -405,7 +405,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto free_cfg;
}
- set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
@@ -740,7 +740,9 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- struct iidc_event *event;
+ struct iidc_rdma_priv_dev_info *privd;
+ struct iidc_rdma_core_dev_info *cdev;
+ struct iidc_rdma_event *event;
u8 tc_map = 0;
int v, ret;
@@ -783,13 +785,17 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
- if (!locked) {
+
+ cdev = pf->cdev_info;
+ if (cdev && !locked) {
+ privd = cdev->iidc_priv;
+ ice_setup_dcb_qos_info(pf, &privd->qos_info);
/* Notify the AUX drivers that TC change is finished */
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return;
- set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_AFTER_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
@@ -846,7 +852,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
goto dcb_init_err;
}
- ice_cfg_sw_lldp(pf_vsi, false, true);
+ ice_cfg_sw_rx_lldp(pf, true);
pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
return 0;
@@ -945,6 +951,37 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
}
/**
+ * ice_setup_dcb_qos_info - Setup DCB QoS information
+ * @pf: ptr to ice_pf
+ * @qos_info: QoS param instance
+ */
+void ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info)
+{
+ struct ice_dcbx_cfg *dcbx_cfg;
+ unsigned int i;
+ u32 up2tc;
+
+ if (!pf || !qos_info)
+ return;
+
+ dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
+
+ qos_info->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
+
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ qos_info->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
+
+ qos_info->pfc_mode = dcbx_cfg->pfc_mode;
+ if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE)
+ for (i = 0; i < DSCP_MAX; i++)
+ qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i];
+}
+
+/**
* ice_dcb_is_mib_change_pending - Check if MIB change is pending
* @state: MIB change state
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 800879a88c5e..da9ba814b4e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -31,6 +31,9 @@ void
ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first);
void
+ice_setup_dcb_qos_info(struct ice_pf *pf,
+ struct iidc_rdma_qos_params *qos_info);
+void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
/**
@@ -134,5 +137,11 @@ static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
+static inline void
+ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info)
+{
+ qos_info->num_tc = 1;
+ qos_info->tc_info[0].rel_bw = 100;
+}
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 6d50b90a7359..a10c1c8d8697 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -754,7 +754,7 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
if (!ice_is_feature_supported(pf, ICE_F_DSCP))
return -EOPNOTSUPP;
- if (app->protocol >= ICE_DSCP_NUM_VAL) {
+ if (app->protocol >= DSCP_MAX) {
netdev_err(netdev, "DSCP value 0x%04X out of range\n",
app->protocol);
return -EINVAL;
@@ -931,7 +931,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
/* if the last DSCP mapping just got deleted, need to switch
* to L2 VLAN QoS mode
*/
- if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
+ if (bitmap_empty(new_cfg->dscp_mapped, DSCP_MAX) &&
new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
ret = ice_aq_set_pfc_mode(&pf->hw,
ICE_AQC_PFC_VLAN_BASED_PFC,
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 03988be03729..59323c019544 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -2345,15 +2345,15 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
- if (ice_is_e825c(hw))
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
- }
- if (!ice_is_e825c(hw))
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ if (hw->mac_type == ICE_MAC_E810 ||
+ hw->mac_type == ICE_MAC_GENERIC)
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ }
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index d5ad6d84007c..bce3ad6ca2a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -95,7 +95,7 @@ ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (ret) {
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin freq:%u on pin:%u\n",
+ "err:%d %s failed to set pin freq:%u on pin:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
freq, pin->idx);
@@ -322,7 +322,7 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to enable %s pin:%u\n",
+ "err:%d %s failed to enable %s pin:%u",
ret, ice_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
@@ -367,7 +367,7 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to disable %s pin:%u\n",
+ "err:%d %s failed to disable %s pin:%u",
ret, ice_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
@@ -479,7 +479,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
err:
if (extack)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to update %s pin:%u\n",
+ "err:%d %s failed to update %s pin:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
@@ -518,7 +518,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
(u8)prio);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin prio:%u on pin:%u\n",
+ "err:%d %s failed to set pin prio:%u on pin:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
prio, pin->idx);
@@ -1004,7 +1004,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
mutex_unlock(&pf->dplls.lock);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n",
+ "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
phase_adjust, p->idx, d->dpll_idx);
@@ -1362,7 +1362,7 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
&p->freq);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n",
+ "err:%d %s failed to set pin state:%u for pin:%u on parent:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
state, p->idx, parent->idx);
@@ -2053,7 +2053,7 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
struct kthread_worker *kworker;
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
- kworker = kthread_create_worker(0, "ice-dplls-%s",
+ kworker = kthread_run_worker(0, "ice-dplls-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
@@ -2065,6 +2065,18 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
}
/**
+ * ice_dpll_phase_range_set - initialize phase adjust range helper
+ * @range: pointer to phase adjust range struct to be initialized
+ * @phase_adj: a value to be used as min(-)/max(+) boundary
+ */
+static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range,
+ u32 phase_adj)
+{
+ range->min = -phase_adj;
+ range->max = phase_adj;
+}
+
+/**
* ice_dpll_init_info_pins_generic - initializes generic pins info
* @pf: board private structure
* @input: if input pins initialized
@@ -2105,8 +2117,8 @@ static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input)
for (i = 0; i < pin_num; i++) {
pins[i].idx = i;
pins[i].prop.board_label = labels[i];
- pins[i].prop.phase_range.min = phase_adj_max;
- pins[i].prop.phase_range.max = -phase_adj_max;
+ ice_dpll_phase_range_set(&pins[i].prop.phase_range,
+ phase_adj_max);
pins[i].prop.capabilities = cap;
pins[i].pf = pf;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
@@ -2152,6 +2164,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
struct ice_hw *hw = &pf->hw;
struct ice_dpll_pin *pins;
unsigned long caps;
+ u32 phase_adj_max;
u8 freq_supp_num;
bool input;
@@ -2159,11 +2172,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
case ICE_DPLL_PIN_TYPE_INPUT:
pins = pf->dplls.inputs;
num_pins = pf->dplls.num_inputs;
+ phase_adj_max = pf->dplls.input_phase_adj_max;
input = true;
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
pins = pf->dplls.outputs;
num_pins = pf->dplls.num_outputs;
+ phase_adj_max = pf->dplls.output_phase_adj_max;
input = false;
break;
default:
@@ -2188,19 +2203,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
return ret;
caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
- pins[i].prop.phase_range.min =
- pf->dplls.input_phase_adj_max;
- pins[i].prop.phase_range.max =
- -pf->dplls.input_phase_adj_max;
} else {
- pins[i].prop.phase_range.min =
- pf->dplls.output_phase_adj_max;
- pins[i].prop.phase_range.max =
- -pf->dplls.output_phase_adj_max;
ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
if (ret)
return ret;
}
+ ice_dpll_phase_range_set(&pins[i].prop.phase_range,
+ phase_adj_max);
pins[i].prop.capabilities = caps;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
if (ret)
@@ -2308,8 +2317,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
dp->dpll_idx = abilities.pps_dpll_idx;
d->num_inputs = abilities.num_inputs;
d->num_outputs = abilities.num_outputs;
- d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
- d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
+ d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) &
+ ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
+ d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) &
+ ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
alloc_size = sizeof(*d->inputs) * d->num_inputs;
d->inputs = kzalloc(alloc_size, GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index fb527434b58b..2e4f0969035f 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -29,6 +29,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
+ ice_vsi_cfg_sw_lldp(uplink_vsi, true, false);
netif_addr_lock_bh(netdev);
__dev_uc_unsync(netdev, NULL);
@@ -38,8 +39,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_vlan_zero;
- if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
- ICE_FLTR_RX))
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
@@ -50,9 +50,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (vlan_ops->dis_rx_filtering(uplink_vsi))
goto err_vlan_filtering;
- if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
- goto err_override_uplink;
-
if (ice_vsi_update_local_lb(uplink_vsi, true))
goto err_override_local_lb;
@@ -64,8 +61,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
err_up:
ice_vsi_update_local_lb(uplink_vsi, false);
err_override_local_lb:
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
-err_override_uplink:
vlan_ops->ena_rx_filtering(uplink_vsi);
err_vlan_filtering:
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
@@ -251,6 +246,10 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
u64 cd_cmd, dst_vsi;
if (!dst) {
+ struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+
+ if (unlikely(eth->h_proto == htons(ETH_P_LLDP)))
+ return;
cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
} else {
@@ -276,7 +275,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
ice_vsi_update_local_lb(uplink_vsi, false);
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
vlan_ops->ena_rx_filtering(uplink_vsi);
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
ICE_FLTR_TX);
@@ -285,6 +283,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
+ ice_vsi_cfg_sw_lldp(uplink_vsi, true, true);
}
/**
@@ -509,10 +508,14 @@ err_create_repr:
*/
int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_repr *repr = ice_repr_create_vf(vf);
struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_repr *repr;
int err;
+ if (!ice_is_eswitch_mode_switchdev(pf))
+ return 0;
+
+ repr = ice_repr_create_vf(vf);
if (IS_ERR(repr))
return PTR_ERR(repr);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index ac7db100e2cd..5c7dcf21b222 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -5,7 +5,7 @@
#define _ICE_ESWITCH_H_
#include <net/devlink.h>
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#ifdef CONFIG_ICE_SWITCHDEV
void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 3072634bf049..bbf9e6fd315b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -710,7 +710,6 @@ static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
{ ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 },
{ ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf },
{ ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf },
- { ICE_AQC_RX_EQU_DRATE, rx, &ptr->rx_equ_drate },
{ ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf },
{ ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf },
{ ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
@@ -1819,7 +1818,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
*/
- ice_cfg_sw_lldp(vsi, false, false);
+ ice_cfg_sw_rx_lldp(vsi->back, false);
/* AQ command to start FW LLDP agent will return an
* error if the agent is already started
@@ -3789,8 +3788,7 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_txq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq);
}
/**
@@ -3799,8 +3797,7 @@ static int ice_get_max_txq(struct ice_pf *pf)
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_rxq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq);
}
/**
@@ -3818,8 +3815,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
- combined++;
+ combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx);
}
return combined;
@@ -3968,11 +3964,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EINVAL;
}
- if (pf->adev) {
+ if (pf->cdev_info && pf->cdev_info->adev) {
mutex_lock(&pf->adev_mutex);
- device_lock(&pf->adev->dev);
+ device_lock(&pf->cdev_info->adev->dev);
locked = true;
- if (pf->adev->dev.driver) {
+ if (pf->cdev_info->adev->dev.driver) {
netdev_err(dev, "Cannot change channels when RDMA is active\n");
ret = -EBUSY;
goto adev_unlock;
@@ -3991,7 +3987,7 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
adev_unlock:
if (locked) {
- device_unlock(&pf->adev->dev);
+ device_unlock(&pf->cdev_info->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return ret;
@@ -4774,7 +4770,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
- .cap_rss_sym_xor_supported = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
.rxfh_per_ctx_key = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
index 8f2ad1c172c0..23b2cfbc9684 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -15,7 +15,6 @@ struct ice_serdes_equalization_to_ethtool {
int rx_equ_post1;
int rx_equ_bflf;
int rx_equ_bfhf;
- int rx_equ_drate;
int rx_equ_ctle_gainhf;
int rx_equ_ctle_gainlf;
int rx_equ_ctle_gaindc;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index ee9862ddfe15..aceec184e89b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1605,22 +1605,19 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf)
*/
int ice_fdir_create_dflt_rules(struct ice_pf *pf)
{
+ static const enum ice_fltr_ptype dflt_rules[] = {
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ };
int err;
/* Create perfect TCP and UDP rules in hardware. */
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
- if (err)
- return err;
+ for (int i = 0; i < ARRAY_SIZE(dflt_rules); i++) {
+ err = ice_create_init_fdir_rule(pf, dflt_rules[i]);
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
+ if (err)
+ break;
+ }
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 2702a0da5c3e..70c201f569ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -6,6 +6,7 @@
#include <linux/crc32.h>
#include <linux/pldmfw.h>
#include "ice.h"
+#include "ice_lib.h"
#include "ice_fw_update.h"
struct ice_fwu_priv {
@@ -125,6 +126,10 @@ ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code,
case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
dev_info(dev, "firmware has rejected updating %s\n", component);
break;
+ case ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK:
+ if (ice_is_recovery_mode(&pf->hw))
+ return 0;
+ break;
}
switch (code) {
@@ -1004,13 +1009,20 @@ int ice_devlink_flash_update(struct devlink *devlink,
return -EOPNOTSUPP;
}
- if (!hw->dev_caps.common_cap.nvm_unified_update) {
+ if (!hw->dev_caps.common_cap.nvm_unified_update && !ice_is_recovery_mode(hw)) {
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
return -EOPNOTSUPP;
}
memset(&priv, 0, sizeof(priv));
+ if (params->component && strcmp(params->component, "fw.mgmt") == 0) {
+ priv.context.mode = PLDMFW_UPDATE_MODE_SINGLE_COMPONENT;
+ priv.context.component_identifier = NVM_COMP_ID_NVM;
+ } else if (params->component) {
+ return -EOPNOTSUPP;
+ }
+
/* the E822 device needs a slightly different ops */
if (hw->mac_type == ICE_MAC_GENERIC)
priv.context.ops = &ice_fwu_ops_e822;
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index f02e8ca55375..6b26290452d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -182,7 +182,7 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
- kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
+ kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
return NULL;
@@ -381,32 +381,23 @@ void ice_gnss_exit(struct ice_pf *pf)
}
/**
- * ice_gnss_is_gps_present - Check if GPS HW is present
+ * ice_gnss_is_module_present - Check if GNSS HW is present
* @hw: pointer to HW struct
+ *
+ * Return: true when GNSS is present, false otherwise.
*/
-bool ice_gnss_is_gps_present(struct ice_hw *hw)
+bool ice_gnss_is_module_present(struct ice_hw *hw)
{
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
- return false;
+ int err;
+ u8 data;
- if (!ice_is_gps_in_netlist(hw))
+ if (!hw->func_caps.ts_func_info.src_tmr_owned ||
+ !ice_is_gps_in_netlist(hw))
return false;
-#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
- if (ice_is_e810t(hw)) {
- int err;
- u8 data;
-
- err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
- if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
- return false;
- } else {
- return false;
- }
-#else
- if (!ice_is_e810t(hw))
+ err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
return false;
-#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
return true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index 75e567ad7059..15daf603ed7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -37,11 +37,11 @@ struct gnss_serial {
#if IS_ENABLED(CONFIG_GNSS)
void ice_gnss_init(struct ice_pf *pf);
void ice_gnss_exit(struct ice_pf *pf);
-bool ice_gnss_is_gps_present(struct ice_hw *hw);
+bool ice_gnss_is_module_present(struct ice_hw *hw);
#else
static inline void ice_gnss_init(struct ice_pf *pf) { }
static inline void ice_gnss_exit(struct ice_pf *pf) { }
-static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
+static inline bool ice_gnss_is_module_present(struct ice_hw *hw)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index dc88aea9f473..aa4bfbcf85d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -541,10 +541,22 @@
#define PFPM_WUS_MAG_M BIT(1)
#define PFPM_WUS_MNG_M BIT(3)
#define PFPM_WUS_FW_RST_WK_M BIT(31)
+#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020
+#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000
#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0
#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0)
#define E830_PRTMAC_CL01_QNT_THR 0x001E3320
#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
+#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32))
+#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32))
+#define E830_GLPTM_ART_CTL 0x00088B50
+#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0)
+#define E830_GLPTM_ART_TIME_H 0x00088B54
+#define E830_GLPTM_ART_TIME_L 0x00088B58
+#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4))
+#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4))
+#define E830_PFPTM_SEM 0x00088B00
+#define E830_PFPTM_SEM_BUSY_M BIT(0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 145b27f2a4ce..6ab53e430f91 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -9,22 +9,25 @@
static DEFINE_XARRAY_ALLOC1(ice_aux_id);
/**
- * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
- * @pf: pointer to PF struct
+ * ice_get_auxiliary_drv - retrieve iidc_rdma_core_auxiliary_drv struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
*
* This function has to be called with a device_lock on the
- * pf->adev.dev to avoid race conditions.
+ * cdev->adev.dev to avoid race conditions.
+ *
+ * Return: pointer to the matched auxiliary driver struct
*/
-static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
+static struct iidc_rdma_core_auxiliary_drv *
+ice_get_auxiliary_drv(struct iidc_rdma_core_dev_info *cdev)
{
struct auxiliary_device *adev;
- adev = pf->adev;
+ adev = cdev->adev;
if (!adev || !adev->dev.driver)
return NULL;
- return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
- adrv.driver);
+ return container_of(adev->dev.driver,
+ struct iidc_rdma_core_auxiliary_drv, adrv.driver);
}
/**
@@ -32,44 +35,54 @@ static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
* @pf: pointer to PF struct
* @event: event struct
*/
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event)
{
- struct iidc_auxiliary_drv *iadrv;
+ struct iidc_rdma_core_auxiliary_drv *iadrv;
+ struct iidc_rdma_core_dev_info *cdev;
if (WARN_ON_ONCE(!in_task()))
return;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return;
+
mutex_lock(&pf->adev_mutex);
- if (!pf->adev)
+ if (!cdev->adev)
goto finish;
- device_lock(&pf->adev->dev);
- iadrv = ice_get_auxiliary_drv(pf);
+ device_lock(&cdev->adev->dev);
+ iadrv = ice_get_auxiliary_drv(cdev);
if (iadrv && iadrv->event_handler)
- iadrv->event_handler(pf, event);
- device_unlock(&pf->adev->dev);
+ iadrv->event_handler(cdev, event);
+ device_unlock(&cdev->adev->dev);
finish:
mutex_unlock(&pf->adev_mutex);
}
/**
* ice_add_rdma_qset - Add Leaf Node for RDMA Qset
- * @pf: PF struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @qset: Resource to be allocated
+ *
+ * Return: Zero on success or error code encountered
*/
-int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset)
{
u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
struct ice_vsi *vsi;
struct device *dev;
+ struct ice_pf *pf;
u32 qset_teid;
u16 qs_handle;
int status;
int i;
- if (WARN_ON(!pf || !qset))
+ if (WARN_ON(!cdev || !qset))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
dev = ice_pf_to_dev(pf);
if (!ice_is_rdma_ena(pf))
@@ -100,7 +113,6 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
dev_err(dev, "Failed VSI RDMA Qset enable\n");
return status;
}
- vsi->qset_handle[qset->tc] = qset->qs_handle;
qset->teid = qset_teid;
return 0;
@@ -109,18 +121,23 @@ EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
/**
* ice_del_rdma_qset - Delete leaf node for RDMA Qset
- * @pf: PF struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @qset: Resource to be freed
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset)
{
struct ice_vsi *vsi;
+ struct ice_pf *pf;
u32 teid;
u16 q_id;
- if (WARN_ON(!pf || !qset))
+ if (WARN_ON(!cdev || !qset))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
vsi = ice_find_vsi(pf, qset->vport_id);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
@@ -130,36 +147,36 @@ int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
q_id = qset->qs_handle;
teid = qset->teid;
- vsi->qset_handle[qset->tc] = 0;
-
return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
}
EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
/**
* ice_rdma_request_reset - accept request from RDMA to perform a reset
- * @pf: struct for PF
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @reset_type: type of reset
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
+int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev,
+ enum iidc_rdma_reset_type reset_type)
{
enum ice_reset_req reset;
+ struct ice_pf *pf;
- if (WARN_ON(!pf))
+ if (WARN_ON(!cdev))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
+
switch (reset_type) {
- case IIDC_PFR:
+ case IIDC_FUNC_RESET:
reset = ICE_RESET_PFR;
break;
- case IIDC_CORER:
+ case IIDC_DEV_RESET:
reset = ICE_RESET_CORER;
break;
- case IIDC_GLOBR:
- reset = ICE_RESET_GLOBR;
- break;
default:
- dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
return -EINVAL;
}
@@ -169,18 +186,23 @@ EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
/**
* ice_rdma_update_vsi_filter - update main VSI filters for RDMA
- * @pf: pointer to struct for PF
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @vsi_id: VSI HW idx to update filter on
* @enable: bool whether to enable or disable filters
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
+int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev,
+ u16 vsi_id, bool enable)
{
struct ice_vsi *vsi;
+ struct ice_pf *pf;
int status;
- if (WARN_ON(!pf))
+ if (WARN_ON(!cdev))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
vsi = ice_find_vsi(pf, vsi_id);
if (!vsi)
return -EINVAL;
@@ -201,88 +223,54 @@ int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
/**
- * ice_get_qos_params - parse QoS params for RDMA consumption
- * @pf: pointer to PF struct
- * @qos: set of QoS values
- */
-void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
-{
- struct ice_dcbx_cfg *dcbx_cfg;
- unsigned int i;
- u32 up2tc;
-
- dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
-
- qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
- for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
- qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
-
- qos->pfc_mode = dcbx_cfg->pfc_mode;
- if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
- for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
- qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
-}
-EXPORT_SYMBOL_GPL(ice_get_qos_params);
-
-/**
- * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver
- * @pf: board private structure to initialize
+ * ice_alloc_rdma_qvector - alloc vector resources reserved for RDMA driver
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
+ * @entry: MSI-X entry to be removed
+ *
+ * Return: Zero on success, error code on failure
*/
-static int ice_alloc_rdma_qvectors(struct ice_pf *pf)
+int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry)
{
- if (ice_is_rdma_ena(pf)) {
- int i;
-
- pf->msix_entries = kcalloc(pf->num_rdma_msix,
- sizeof(*pf->msix_entries),
- GFP_KERNEL);
- if (!pf->msix_entries)
- return -ENOMEM;
+ struct msi_map map;
+ struct ice_pf *pf;
- /* RDMA is the only user of pf->msix_entries array */
- pf->rdma_base_vector = 0;
+ if (WARN_ON(!cdev))
+ return -EINVAL;
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msix_entry *entry = &pf->msix_entries[i];
- struct msi_map map;
+ pf = pci_get_drvdata(cdev->pdev);
+ map = ice_alloc_irq(pf, true);
+ if (map.index < 0)
+ return -ENOMEM;
- map = ice_alloc_irq(pf, false);
- if (map.index < 0)
- break;
+ entry->entry = map.index;
+ entry->vector = map.virq;
- entry->entry = map.index;
- entry->vector = map.virq;
- }
- }
return 0;
}
+EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector);
/**
* ice_free_rdma_qvector - free vector resources reserved for RDMA driver
- * @pf: board private structure to initialize
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
+ * @entry: MSI-X entry to be removed
*/
-static void ice_free_rdma_qvector(struct ice_pf *pf)
+void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry)
{
- int i;
+ struct msi_map map;
+ struct ice_pf *pf;
- if (!pf->msix_entries)
+ if (WARN_ON(!cdev || !entry))
return;
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msi_map map;
-
- map.index = pf->msix_entries[i].entry;
- map.virq = pf->msix_entries[i].vector;
- ice_free_irq(pf, map);
- }
+ pf = pci_get_drvdata(cdev->pdev);
- kfree(pf->msix_entries);
- pf->msix_entries = NULL;
+ map.index = entry->entry;
+ map.virq = entry->vector;
+ ice_free_irq(pf, map);
}
+EXPORT_SYMBOL_GPL(ice_free_rdma_qvector);
/**
* ice_adev_release - function to be mapped to AUX dev's release op
@@ -290,19 +278,23 @@ static void ice_free_rdma_qvector(struct ice_pf *pf)
*/
static void ice_adev_release(struct device *dev)
{
- struct iidc_auxiliary_dev *iadev;
+ struct iidc_rdma_core_auxiliary_dev *iadev;
- iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
+ iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev,
+ adev.dev);
kfree(iadev);
}
/**
* ice_plug_aux_dev - allocate and register AUX device
* @pf: pointer to pf struct
+ *
+ * Return: Zero on success, error code on failure
*/
int ice_plug_aux_dev(struct ice_pf *pf)
{
- struct iidc_auxiliary_dev *iadev;
+ struct iidc_rdma_core_auxiliary_dev *iadev;
+ struct iidc_rdma_core_dev_info *cdev;
struct auxiliary_device *adev;
int ret;
@@ -312,17 +304,22 @@ int ice_plug_aux_dev(struct ice_pf *pf)
if (!ice_is_rdma_ena(pf))
return 0;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev)
return -ENOMEM;
adev = &iadev->adev;
- iadev->pf = pf;
+ iadev->cdev_info = cdev;
adev->id = pf->aux_idx;
adev->dev.release = ice_adev_release;
adev->dev.parent = &pf->pdev->dev;
- adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
+ adev->name = cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ "roce" : "iwarp";
ret = auxiliary_device_init(adev);
if (ret) {
@@ -337,7 +334,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
}
mutex_lock(&pf->adev_mutex);
- pf->adev = adev;
+ cdev->adev = adev;
mutex_unlock(&pf->adev_mutex);
return 0;
@@ -351,8 +348,8 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
struct auxiliary_device *adev;
mutex_lock(&pf->adev_mutex);
- adev = pf->adev;
- pf->adev = NULL;
+ adev = pf->cdev_info->adev;
+ pf->cdev_info->adev = NULL;
mutex_unlock(&pf->adev_mutex);
if (adev) {
@@ -367,7 +364,9 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
*/
int ice_init_rdma(struct ice_pf *pf)
{
+ struct iidc_rdma_priv_dev_info *privd;
struct device *dev = &pf->pdev->dev;
+ struct iidc_rdma_core_dev_info *cdev;
int ret;
if (!ice_is_rdma_ena(pf)) {
@@ -375,30 +374,50 @@ int ice_init_rdma(struct ice_pf *pf)
return 0;
}
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ pf->cdev_info = cdev;
+
+ privd = kzalloc(sizeof(*privd), GFP_KERNEL);
+ if (!privd) {
+ ret = -ENOMEM;
+ goto err_privd_alloc;
+ }
+
+ privd->pf_id = pf->hw.pf_id;
ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
GFP_KERNEL);
if (ret) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_alloc_xa;
}
- /* Reserve vector resources */
- ret = ice_alloc_rdma_qvectors(pf);
- if (ret < 0) {
- dev_err(dev, "failed to reserve vectors for RDMA\n");
- goto err_reserve_rdma_qvector;
- }
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->iidc_priv = privd;
+ privd->netdev = pf->vsi[0]->netdev;
+
+ privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr;
+ cdev->pdev = pf->pdev;
+ privd->vport_id = pf->vsi[0]->vsi_num;
+
+ pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ ice_setup_dcb_qos_info(pf, &privd->qos_info);
ret = ice_plug_aux_dev(pf);
if (ret)
goto err_plug_aux_dev;
return 0;
err_plug_aux_dev:
- ice_free_rdma_qvector(pf);
-err_reserve_rdma_qvector:
- pf->adev = NULL;
+ pf->cdev_info->adev = NULL;
xa_erase(&ice_aux_id, pf->aux_idx);
+err_alloc_xa:
+ kfree(privd);
+err_privd_alloc:
+ kfree(cdev);
+ pf->cdev_info = NULL;
+
return ret;
}
@@ -412,6 +431,8 @@ void ice_deinit_rdma(struct ice_pf *pf)
return;
ice_unplug_aux_dev(pf);
- ice_free_rdma_qvector(pf);
xa_erase(&ice_aux_id, pf->aux_idx);
+ kfree(pf->cdev_info->iidc_priv);
+ kfree(pf->cdev_info);
+ pf->cdev_info = NULL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h
index 4b0c86757df9..17dbfcfb6a2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc_int.h
+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h
@@ -4,10 +4,11 @@
#ifndef _ICE_IDC_INT_H_
#define _ICE_IDC_INT_H_
-#include <linux/net/intel/iidc.h>
+#include <linux/net/intel/iidc_rdma.h>
+#include <linux/net/intel/iidc_rdma_ice.h>
struct ice_pf;
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event);
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event);
#endif /* !_ICE_IDC_INT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index ad82ff7d1995..30801fd375f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -20,6 +20,19 @@ ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors,
xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC);
}
+static int
+ice_init_virt_irq_tracker(struct ice_pf *pf, u32 base, u32 num_entries)
+{
+ pf->virt_irq_tracker.bm = bitmap_zalloc(num_entries, GFP_KERNEL);
+ if (!pf->virt_irq_tracker.bm)
+ return -ENOMEM;
+
+ pf->virt_irq_tracker.num_entries = num_entries;
+ pf->virt_irq_tracker.base = base;
+
+ return 0;
+}
+
/**
* ice_deinit_irq_tracker - free xarray tracker
* @pf: board private structure
@@ -29,6 +42,11 @@ static void ice_deinit_irq_tracker(struct ice_pf *pf)
xa_destroy(&pf->irq_tracker.entries);
}
+static void ice_deinit_virt_irq_tracker(struct ice_pf *pf)
+{
+ bitmap_free(pf->virt_irq_tracker.bm);
+}
+
/**
* ice_free_irq_res - free a block of resources
* @pf: board private structure
@@ -45,7 +63,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
/**
* ice_get_irq_res - get an interrupt resource
* @pf: board private structure
- * @dyn_only: force entry to be dynamically allocated
+ * @dyn_allowed: allow entry to be dynamically allocated
*
* Allocate new irq entry in the free slot of the tracker. Since xarray
* is used, always allocate new entry at the lowest possible index. Set
@@ -53,11 +71,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
*
* Returns allocated irq entry or NULL on failure.
*/
-static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
+static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
+ bool dyn_allowed)
{
- struct xa_limit limit = { .max = pf->irq_tracker.num_entries,
+ struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1,
.min = 0 };
- unsigned int num_static = pf->irq_tracker.num_static;
+ unsigned int num_static = pf->irq_tracker.num_static - 1;
struct ice_irq_entry *entry;
unsigned int index;
int ret;
@@ -66,9 +85,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
if (!entry)
return NULL;
- /* skip preallocated entries if the caller says so */
- if (dyn_only)
- limit.min = num_static;
+ /* only already allocated if the caller says so */
+ if (!dyn_allowed)
+ limit.max = num_static;
ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
GFP_KERNEL);
@@ -78,161 +97,18 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
entry = NULL;
} else {
entry->index = index;
- entry->dynamic = index >= num_static;
+ entry->dynamic = index > num_static;
}
return entry;
}
-/**
- * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
- * @pf: board private structure
- * @v_remain: number of remaining MSI-X vectors to be distributed
- *
- * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
- * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
- * remaining vectors.
- */
-static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
-{
- int v_rdma;
-
- if (!ice_is_rdma_ena(pf)) {
- pf->num_lan_msix = v_remain;
- return;
- }
-
- /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
- dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining vectors to LAN MSIX
- */
- pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
- pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
- } else {
- /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
-}
-
-/**
- * ice_ena_msix_range - Request a range of MSIX vectors from the OS
- * @pf: board private structure
- *
- * Compute the number of MSIX vectors wanted and request from the OS. Adjust
- * device usage if there are not enough vectors. Return the number of vectors
- * reserved or negative on failure.
- */
-static int ice_ena_msix_range(struct ice_pf *pf)
+#define ICE_RDMA_AEQ_MSIX 1
+static int ice_get_default_msix_amount(struct ice_pf *pf)
{
- int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
- num_cpus = num_online_cpus();
-
- /* LAN miscellaneous handler */
- v_other = ICE_MIN_LAN_OICR_MSIX;
-
- /* Flow Director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
- v_other += ICE_FDIR_MSIX;
-
- /* switchdev */
- v_other += ICE_ESWITCH_MSIX;
-
- v_wanted = v_other;
-
- /* LAN traffic */
- pf->num_lan_msix = num_cpus;
- v_wanted += pf->num_lan_msix;
-
- /* RDMA auxiliary driver */
- if (ice_is_rdma_ena(pf)) {
- pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- v_wanted += pf->num_rdma_msix;
- }
-
- if (v_wanted > hw_num_msix) {
- int v_remain;
-
- dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
- v_wanted, hw_num_msix);
-
- if (hw_num_msix < ICE_MIN_MSIX) {
- err = -ERANGE;
- goto exit_err;
- }
-
- v_remain = hw_num_msix - v_other;
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
- v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
- }
-
- ice_reduce_msix_usage(pf, v_remain);
- v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
-
- dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
-
- /* actually reserve the vectors */
- v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted,
- PCI_IRQ_MSIX);
- if (v_actual < 0) {
- dev_err(dev, "unable to reserve MSI-X vectors\n");
- err = v_actual;
- goto exit_err;
- }
-
- if (v_actual < v_wanted) {
- dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_wanted, v_actual);
-
- if (v_actual < ICE_MIN_MSIX) {
- /* error if we can't get minimum vectors */
- pci_free_irq_vectors(pf->pdev);
- err = -ERANGE;
- goto exit_err;
- } else {
- int v_remain = v_actual - v_other;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
-
- ice_reduce_msix_usage(pf, v_remain);
-
- dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
-
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
- }
-
- return v_actual;
-
-exit_err:
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = 0;
- return err;
+ return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() +
+ (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) +
+ (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0);
}
/**
@@ -243,6 +119,7 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
pci_free_irq_vectors(pf->pdev);
ice_deinit_irq_tracker(pf);
+ ice_deinit_virt_irq_tracker(pf);
}
/**
@@ -252,27 +129,38 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors, max_vectors;
+ int vectors;
- vectors = ice_ena_msix_range(pf);
+ /* load default PF MSI-X range */
+ if (!pf->msix.min)
+ pf->msix.min = ICE_MIN_MSIX;
- if (vectors < 0)
- return -ENOMEM;
+ if (!pf->msix.max)
+ pf->msix.max = min(total_vectors,
+ ice_get_default_msix_amount(pf));
+
+ pf->msix.total = total_vectors;
+ pf->msix.rest = total_vectors - pf->msix.max;
if (pci_msix_can_alloc_dyn(pf->pdev))
- max_vectors = total_vectors;
+ vectors = pf->msix.min;
else
- max_vectors = vectors;
+ vectors = pf->msix.max;
+
+ vectors = pci_alloc_irq_vectors(pf->pdev, pf->msix.min, vectors,
+ PCI_IRQ_MSIX);
+ if (vectors < 0)
+ return vectors;
- ice_init_irq_tracker(pf, max_vectors, vectors);
+ ice_init_irq_tracker(pf, pf->msix.max, vectors);
- return 0;
+ return ice_init_virt_irq_tracker(pf, pf->msix.max, pf->msix.rest);
}
/**
* ice_alloc_irq - Allocate new interrupt vector
* @pf: board private structure
- * @dyn_only: force dynamic allocation of the interrupt
+ * @dyn_allowed: allow dynamic allocation of the interrupt
*
* Allocate new interrupt vector for a given owner id.
* return struct msi_map with interrupt details and track
@@ -285,27 +173,22 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
* interrupt will be allocated with pci_msix_alloc_irq_at.
*
* Some callers may only support dynamically allocated interrupts.
- * This is indicated with dyn_only flag.
+ * This is indicated with dyn_allowed flag.
*
* On failure, return map with negative .index. The caller
* is expected to check returned map index.
*
*/
-struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only)
+struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed)
{
- int sriov_base_vector = pf->sriov_base_vector;
struct msi_map map = { .index = -ENOENT };
struct device *dev = ice_pf_to_dev(pf);
struct ice_irq_entry *entry;
- entry = ice_get_irq_res(pf, dyn_only);
+ entry = ice_get_irq_res(pf, dyn_allowed);
if (!entry)
return map;
- /* fail if we're about to violate SRIOV vectors space */
- if (sriov_base_vector && entry->index >= sriov_base_vector)
- goto exit_free_res;
-
if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) {
map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL);
if (map.index < 0)
@@ -353,26 +236,40 @@ void ice_free_irq(struct ice_pf *pf, struct msi_map map)
}
/**
- * ice_get_max_used_msix_vector - Get the max used interrupt vector
- * @pf: board private structure
+ * ice_virt_get_irqs - get irqs for SR-IOV usacase
+ * @pf: pointer to PF structure
+ * @needed: number of irqs to get
*
- * Return index of maximum used interrupt vectors with respect to the
- * beginning of the MSIX table. Take into account that some interrupts
- * may have been dynamically allocated after MSIX was initially enabled.
+ * This returns the first MSI-X vector index in PF space that is used by this
+ * VF. This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration_id: id of VF which will
+ * use this irqs
*/
-int ice_get_max_used_msix_vector(struct ice_pf *pf)
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed)
{
- unsigned long start, index, max_idx;
- void *entry;
+ int res = bitmap_find_next_zero_area(pf->virt_irq_tracker.bm,
+ pf->virt_irq_tracker.num_entries,
+ 0, needed, 0);
- /* Treat all preallocated interrupts as used */
- start = pf->irq_tracker.num_static;
- max_idx = start - 1;
+ if (res >= pf->virt_irq_tracker.num_entries)
+ return -ENOENT;
- xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) {
- if (index > max_idx)
- max_idx = index;
- }
+ bitmap_set(pf->virt_irq_tracker.bm, res, needed);
+
+ /* conversion from number in bitmap to global irq index */
+ return res + pf->virt_irq_tracker.base;
+}
- return max_idx;
+/**
+ * ice_virt_free_irqs - free irqs used by the VF
+ * @pf: pointer to PF structure
+ * @index: first index to be free
+ * @irqs: number of irqs to free
+ */
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs)
+{
+ bitmap_clear(pf->virt_irq_tracker.bm, index - pf->virt_irq_tracker.base,
+ irqs);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h
index f35efc08575e..b2f9dbafd57e 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.h
+++ b/drivers/net/ethernet/intel/ice/ice_irq.h
@@ -15,11 +15,22 @@ struct ice_irq_tracker {
u16 num_static; /* preallocated entries */
};
+struct ice_virt_irq_tracker {
+ unsigned long *bm; /* bitmap to track irq usage */
+ u32 num_entries;
+ /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
+ * number of MSIX vectors needed for all SR-IOV VFs from the number of
+ * MSIX vectors allowed on this PF.
+ */
+ u32 base;
+};
+
int ice_init_interrupt_scheme(struct ice_pf *pf);
void ice_clear_interrupt_scheme(struct ice_pf *pf);
struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only);
void ice_free_irq(struct ice_pf *pf, struct msi_map map);
-int ice_get_max_used_msix_vector(struct ice_pf *pf);
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed);
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 1ccb572ce285..2410aee59fb2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -1001,6 +1001,28 @@ static void ice_lag_link(struct ice_lag *lag)
}
/**
+ * ice_lag_config_eswitch - configure eswitch to work with LAG
+ * @lag: lag info struct
+ * @netdev: active network interface device struct
+ *
+ * Updates all port representors in eswitch to use @netdev for Tx.
+ *
+ * Configures the netdev to keep dst metadata (also used in representor Tx).
+ * This is required for an uplink without switchdev mode configured.
+ */
+static void ice_lag_config_eswitch(struct ice_lag *lag,
+ struct net_device *netdev)
+{
+ struct ice_repr *repr;
+ unsigned long id;
+
+ xa_for_each(&lag->pf->eswitch.reprs, id, repr)
+ repr->dst->u.port_info.lower_dev = netdev;
+
+ netif_keep_dst(netdev);
+}
+
+/**
* ice_lag_unlink - handle unlink event
* @lag: LAG info struct
*/
@@ -1021,6 +1043,9 @@ static void ice_lag_unlink(struct ice_lag *lag)
ice_lag_move_vf_nodes(lag, act_port, pri_port);
lag->primary = false;
lag->active_port = ICE_LAG_INVALID_PORT;
+
+ /* Config primary's eswitch back to normal operation. */
+ ice_lag_config_eswitch(lag, lag->netdev);
} else {
struct ice_lag *primary_lag;
@@ -1296,12 +1321,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
*/
if (!primary_lag) {
lag->primary = true;
+ if (!ice_is_switchdev_running(lag->pf))
+ return;
+
/* Configure primary's SWID to be shared */
ice_lag_primary_swid(lag, true);
primary_lag = lag;
} else {
u16 swid;
+ if (!ice_is_switchdev_running(primary_lag->pf))
+ return;
+
swid = primary_lag->pf->hw.port_info->sw_id;
ice_lag_set_swid(swid, lag, true);
ice_lag_add_prune_list(primary_lag, lag->pf);
@@ -1419,6 +1450,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
ice_lag_move_vf_nodes(lag, prim_port,
event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
return;
}
@@ -1428,6 +1460,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
/* new active port */
ice_lag_move_vf_nodes(lag, lag->active_port, event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
} else {
/* port not set as currently active (e.g. new active port
* has already claimed the nodes and filters
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 611577ebc29d..77ba26538b07 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -229,7 +229,7 @@ struct ice_32b_rx_flex_desc_nic {
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
- __le16 l2tag2_1st;
+ __le16 raw_csum;
__le16 l2tag2_2nd;
/* Qword 3 */
@@ -371,29 +371,21 @@ enum ice_rx_flex_desc_status_error_1_bits {
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
-#define ICE_RXQ_CTX_SIZE_DWORDS 8
-#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
-/* RLAN Rx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* RLAN Rx queue context data */
struct ice_rlan_ctx {
u16 head;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
#define ICE_RLAN_BASE_S 7
u64 base;
u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7
- u16 dbuf; /* bigger than needed, see above for reason */
+ u8 dbuf;
#define ICE_RLAN_CTX_HBUF_S 6
- u16 hbuf; /* bigger than needed, see above for reason */
+ u8 hbuf;
u8 dtype;
u8 dsize;
u8 crcstrip;
@@ -401,29 +393,15 @@ struct ice_rlan_ctx {
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
- u32 rxmax; /* bigger than needed, see above for reason */
+ u16 rxmax;
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
- u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 lrxqthresh;
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
-struct ice_ctx_ele {
- u16 offset;
- u16 size_of;
- u16 width;
- u16 lsb;
-};
-
-#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
- .offset = offsetof(struct _struct, _ele), \
- .size_of = sizeof_field(struct _struct, _ele), \
- .width = _width, \
- .lsb = _lsb, \
-}
-
/* for hsplit_0 field of Rx RLAN context */
enum ice_rlan_ctx_rx_hsplit_0 {
ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
@@ -500,10 +478,15 @@ enum ice_tx_desc_len_fields {
struct ice_tx_ctx_desc {
__le32 tunneling_params;
__le16 l2tag2;
- __le16 rsvd;
+ __le16 gcs;
__le64 qw1;
};
+#define ICE_TX_GCS_DESC_START_M GENMASK(7, 0)
+#define ICE_TX_GCS_DESC_OFFSET_M GENMASK(11, 8)
+#define ICE_TX_GCS_DESC_TYPE_M GENMASK(14, 12)
+#define ICE_TX_GCS_DESC_CSUM_PSH 1
+
#define ICE_TXD_CTX_QW1_CMD_S 4
#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
@@ -551,18 +534,12 @@ enum ice_tx_ctx_desc_eipt_offload {
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023
-/* Tx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* Tx queue context data */
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
u8 port_num;
- u16 cgd_num; /* bigger than needed, see above for reason */
+ u8 cgd_num;
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
@@ -573,7 +550,7 @@ struct ice_tlan_ctx {
u8 tsyn_ena;
u8 internal_usage_flag;
u8 alt_vlan;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
u8 wb_mode;
u8 tphrd_desc;
u8 tphrd;
@@ -582,7 +559,7 @@ struct ice_tlan_ctx {
u16 qnum_in_func;
u8 itr_notification_mode;
u8 adjust_prof_id;
- u32 qlen; /* bigger than needed, see above for reason */
+ u16 qlen;
u8 quanta_prof_idx;
u8 tso_ena;
u16 tso_qnum;
@@ -590,7 +567,6 @@ struct ice_tlan_ctx {
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a7d45a8ce7ac..03bb16191237 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -157,6 +157,16 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
}
}
+static u16 ice_get_rxq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_rxq_count(pf), num_online_cpus());
+}
+
+static u16 ice_get_txq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_txq_count(pf), num_online_cpus());
+}
+
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
@@ -178,9 +188,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
} else {
- vsi->alloc_txq = min3(pf->num_lan_msix,
- ice_get_avail_txq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_txq = ice_get_txq_count(pf);
}
pf->num_lan_tx = vsi->alloc_txq;
@@ -193,17 +201,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
} else {
- vsi->alloc_rxq = min3(pf->num_lan_msix,
- ice_get_avail_rxq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_rxq = ice_get_rxq_count(pf);
}
}
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
- max_t(int, vsi->alloc_rxq,
- vsi->alloc_txq));
+ vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq);
break;
case ICE_VSI_SF:
vsi->alloc_txq = 1;
@@ -567,6 +571,8 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
return -ENOMEM;
}
+ vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev);
+
switch (vsi->type) {
case ICE_VSI_PF:
case ICE_VSI_SF:
@@ -827,7 +833,13 @@ bool ice_is_safe_mode(struct ice_pf *pf)
*/
bool ice_is_rdma_ena(struct ice_pf *pf)
{
- return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ union devlink_param_value value;
+ int err;
+
+ err = devl_param_driverinit_value_get(priv_to_devlink(pf),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ &value);
+ return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool;
}
/**
@@ -1173,12 +1185,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
static void
ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- struct ice_pf *pf = vsi->back;
u16 qcount, qmap;
u8 offset = 0;
int pow;
- qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
+ qcount = vsi->num_rxq;
pow = order_base_2(qcount);
qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
@@ -1420,6 +1431,10 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->dev = dev;
ring->count = vsi->num_rx_desc;
ring->cached_phctime = pf->ptp.cached_phc_time;
+
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ ring->flags |= ICE_RX_FLAGS_RING_GCS;
+
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -1700,6 +1715,12 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf)
return true;
}
+#define ICE_FW_MODE_REC_M BIT(1)
+bool ice_is_recovery_mode(struct ice_hw *hw)
+{
+ return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M;
+}
+
/**
* ice_update_eth_stats - Update VSI-specific ethernet statistics counters
* @vsi: the VSI to be updated
@@ -1758,9 +1779,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
* @prio: priority for the RXDID for this queue
* @ena_ts: true to enable timestamp and false to disable timestamp
*/
-void
-ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
- bool ena_ts)
+void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
+ bool ena_ts)
{
int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
@@ -2045,12 +2065,15 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
}
/**
- * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
+ * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling
* @vsi: the VSI being configured
* @tx: bool to determine Tx or Rx rule
* @create: bool to determine create or remove Rule
+ *
+ * Adding an ethtype Tx rule to the uplink VSI results in it being applied
+ * to the whole port, so LLDP transmission for VFs will be blocked too.
*/
-void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
+void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
enum ice_sw_fwd_act_type act);
@@ -2065,19 +2088,59 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
ICE_DROP_PACKET);
} else {
- if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
- status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
- create);
- } else {
+ if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) {
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
ICE_FWD_TO_VSI);
+ if (!status || !create)
+ goto report;
+
+ dev_info(dev,
+ "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n",
+ vsi->vsi_num, status);
}
+
+ status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create);
+ if (!status)
+ set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags);
+
}
+report:
if (status)
- dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
- create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, status);
+ dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n",
+ create ? "add" : "remove", tx ? "Tx" : "Rx",
+ vsi->vsi_num, status);
+}
+
+/**
+ * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP
+ * @pf: the PF being configured
+ * @enable: enable or disable
+ *
+ * Configure switch rules to enable/disable LLDP handling by software
+ * across PF.
+ */
+void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable)
+{
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ vsi = ice_get_main_vsi(pf);
+ ice_vsi_cfg_sw_lldp(vsi, false, enable);
+
+ if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
+ return;
+
+ ice_for_each_vf(pf, bkt, vf) {
+ vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ continue;
+
+ if (ice_vf_is_lldp_ena(vf))
+ ice_vsi_cfg_sw_lldp(vsi, false, enable);
+ }
}
/**
@@ -2508,7 +2571,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, true);
+ ice_vsi_cfg_sw_lldp(vsi, true, true);
}
if (!vsi->agg_node)
@@ -2576,7 +2639,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
- ice_free_cpu_rx_rmap(vsi);
ice_for_each_q_vector(vsi, i) {
int irq_num;
@@ -2589,12 +2651,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
vsi->q_vectors[i]->num_ring_rx))
continue;
- /* clear the affinity notifier in the IRQ descriptor */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
-
- /* clear the affinity_hint in the IRQ descriptor */
- irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
}
@@ -2749,11 +2805,18 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
{
struct net_device *netdev = vsi->netdev;
- int q_idx;
+ int q_idx, v_idx;
if (!netdev)
return;
+ /* Clear the NAPI's interrupt number */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+
+ netif_napi_set_irq(&q_vector->napi, -1);
+ }
+
ice_for_each_txq(vsi, q_idx)
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
@@ -2805,9 +2868,11 @@ int ice_vsi_release(struct ice_vsi *vsi)
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
- if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
- !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
+ if (!ice_is_safe_mode(pf) &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) &&
+ (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF &&
+ ice_vf_is_lldp_ena(vsi->vf))))
+ ice_vsi_cfg_sw_lldp(vsi, false, false);
ice_vsi_decfg(vsi);
@@ -3876,15 +3941,17 @@ void ice_init_feature_support(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_CGU);
if (ice_is_clock_mux_in_netlist(&pf->hw))
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
- if (ice_gnss_is_gps_present(&pf->hw))
+ if (ice_gnss_is_module_present(&pf->hw))
ice_set_feature_support(pf, ICE_F_GNSS);
break;
default:
break;
}
- if (pf->hw.mac_type == ICE_MAC_E830)
+ if (pf->hw.mac_type == ICE_MAC_E830) {
ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
+ ice_set_feature_support(pf, ICE_F_GCS);
+ }
}
/**
@@ -3931,24 +3998,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
}
/**
- * ice_vsi_ctx_set_allow_override - allow destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
- * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
* ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
* @vsi: pointer to VSI structure
* @set: set or unset the bit
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 10d6fc479a32..654516c5fc3e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -29,7 +29,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
-void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable);
int ice_set_link(struct ice_vsi *vsi, bool ena);
@@ -90,6 +91,7 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_rdma_ena(struct ice_pf *pf);
+bool ice_is_recovery_mode(struct ice_hw *hw);
bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);
bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi);
int ice_set_dflt_vsi(struct ice_vsi *vsi);
@@ -104,10 +106,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 1eaa4428fd24..0a11b4281092 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -14,7 +14,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "devlink/devlink.h"
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#include "ice_sf_eth.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
@@ -37,7 +37,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
MODULE_DESCRIPTION(DRV_SUMMARY);
-MODULE_IMPORT_NS(LIBIE);
+MODULE_IMPORT_NS("LIBIE");
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
@@ -1144,7 +1144,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
- ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+ ice_ptp_link_change(pf, link_up);
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
@@ -1567,6 +1567,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
+ case ice_aqc_opc_get_health_status:
+ ice_process_health_status_event(pf, &event);
+ break;
default:
dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
@@ -1714,7 +1717,7 @@ static int ice_service_task_stop(struct ice_pf *pf)
ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
if (pf->serv_tmr.function)
- del_timer_sync(&pf->serv_tmr);
+ timer_delete_sync(&pf->serv_tmr);
if (pf->serv_task.func)
cancel_work_sync(&pf->serv_task);
@@ -1740,7 +1743,7 @@ static void ice_service_task_restart(struct ice_pf *pf)
*/
static void ice_service_timer(struct timer_list *t)
{
- struct ice_pf *pf = from_timer(pf, t, serv_tmr);
+ struct ice_pf *pf = timer_container_of(pf, t, serv_tmr);
mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
ice_service_task_schedule(pf);
@@ -1816,6 +1819,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
}
@@ -1829,6 +1834,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
}
@@ -1842,6 +1849,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event,
+ queue);
wr32(hw, GL_MDET_RX, 0xffffffff);
}
@@ -2355,6 +2364,18 @@ static void ice_check_media_subtask(struct ice_pf *pf)
}
}
+static void ice_service_task_recovery_mode(struct work_struct *work)
+{
+ struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
+
+ set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
+ ice_clean_adminq_subtask(pf);
+
+ ice_service_task_complete(pf);
+
+ mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100));
+}
+
/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
@@ -2364,9 +2385,11 @@ static void ice_service_task(struct work_struct *work)
struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
unsigned long start_time = jiffies;
- /* subtasks */
+ if (pf->health_reporters.tx_hang_buf.tx_ring) {
+ ice_report_tx_hang(pf);
+ pf->health_reporters.tx_hang_buf.tx_ring = NULL;
+ }
- /* process reset requests first */
ice_reset_subtask(pf);
/* bail if a reset/recovery cycle is pending or rebuild failed */
@@ -2378,11 +2401,11 @@ static void ice_service_task(struct work_struct *work)
}
if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
- struct iidc_event *event;
+ struct iidc_rdma_event *event;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
- set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type);
/* report the entire OICR value to AUX driver */
swap(event->reg, pf->oicr_err_reg);
ice_send_event_to_aux(pf, event);
@@ -2401,11 +2424,11 @@ static void ice_service_task(struct work_struct *work)
ice_plug_aux_dev(pf);
if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
- struct iidc_event *event;
+ struct iidc_rdma_event *event;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
- set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
@@ -2505,34 +2528,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
}
/**
- * ice_irq_affinity_notify - Callback for affinity changes
- * @notify: context as to what irq was changed
- * @mask: the new affinity mask
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * so that we may register to receive changes to the irq affinity masks.
- */
-static void
-ice_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct ice_q_vector *q_vector =
- container_of(notify, struct ice_q_vector, affinity_notify);
-
- cpumask_copy(&q_vector->affinity_mask, mask);
-}
-
-/**
- * ice_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- */
-static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
-
-/**
* ice_vsi_ena_irq - Enable IRQ for the given VSI
* @vsi: the VSI being configured
*/
@@ -2595,19 +2590,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
err);
goto free_q_irqs;
}
-
- /* register for affinity change notifications */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
- struct irq_affinity_notify *affinity_notify;
-
- affinity_notify = &q_vector->affinity_notify;
- affinity_notify->notify = ice_irq_affinity_notify;
- affinity_notify->release = ice_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, affinity_notify);
- }
-
- /* assign the mask for this irq */
- irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
}
err = ice_set_cpu_rx_rmap(vsi);
@@ -2623,9 +2605,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
free_q_irqs:
while (vector--) {
irq_num = vsi->q_vectors[vector]->irq.virq;
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -2762,6 +2741,27 @@ void ice_map_xdp_rings(struct ice_vsi *vsi)
}
/**
+ * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
+ * @vsi: the VSI with XDP rings being unmapped
+ */
+static void ice_unmap_xdp_rings(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (!ring->tx_buf || !ice_ring_is_xdp(ring))
+ break;
+
+ /* restore the value of last node prior to XDP setup */
+ q_vector->tx.tx_ring = ring;
+ }
+}
+
+/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
@@ -2824,7 +2824,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
if (status) {
dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
status);
- goto clear_xdp_rings;
+ goto unmap_xdp_rings;
}
/* assign the prog only when it's not already present on VSI;
@@ -2840,6 +2840,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
+unmap_xdp_rings:
+ ice_unmap_xdp_rings(vsi);
clear_xdp_rings:
ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
@@ -2856,6 +2858,8 @@ err_map_xdp:
mutex_unlock(&pf->avail_q_mutex);
devm_kfree(dev, vsi->xdp_rings);
+ vsi->xdp_rings = NULL;
+
return -ENOMEM;
}
@@ -2871,7 +2875,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
- int i, v_idx;
+ int i;
/* q_vectors are freed in reset path so there's no point in detaching
* rings
@@ -2879,17 +2883,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx)
- if (!ring->tx_buf || !ice_ring_is_xdp(ring))
- break;
-
- /* restore the value of last node prior to XDP setup */
- q_vector->tx.tx_ring = ring;
- }
+ ice_unmap_xdp_rings(vsi);
free_qmap:
mutex_lock(&pf->avail_q_mutex);
@@ -3034,11 +3028,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ goto resume_if;
} else {
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
ICE_XDP_CFG_FULL);
- if (xdp_ring_err)
+ if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ goto resume_if;
+ }
}
xdp_features_set_redirect_target(vsi->netdev, true);
/* reallocate Rx queues that are used for zero-copy */
@@ -3056,6 +3053,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
}
+resume_if:
if (if_running)
ret = ice_up(vsi);
@@ -3281,22 +3279,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (ice_pf_state_is_nominal(pf) &&
- pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
- struct ice_ptp_tx *tx = &pf->ptp.port.tx;
- unsigned long flags;
- u8 idx;
-
- spin_lock_irqsave(&tx->lock, flags);
- idx = find_next_bit_wrap(tx->in_use, tx->len,
- tx->last_ll_ts_idx_read + 1);
- if (idx != tx->len)
- ice_ptp_req_tx_single_tstamp(tx, idx);
- spin_unlock_irqrestore(&tx->lock, flags);
- } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
- set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
- ret = IRQ_WAKE_THREAD;
- }
+
+ ret = ice_ptp_ts_irq(pf);
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3666,6 +3650,15 @@ void ice_set_netdev_features(struct net_device *netdev)
*/
netdev->hw_features |= NETIF_F_RXFCS;
+ /* Allow core to manage IRQs affinity */
+ netif_set_affinity_auto(netdev);
+
+ /* Mutual exclusivity for TSO and GCS is enforced by the set features
+ * ndo callback.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ netdev->hw_features |= NETIF_F_HW_CSUM;
+
netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
}
@@ -4043,8 +4036,7 @@ static void ice_set_pf_caps(struct ice_pf *pf)
}
clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
- if (func_caps->common_cap.ieee_1588 &&
- !(pf->hw.mac_type == ICE_MAC_E830))
+ if (func_caps->common_cap.ieee_1588)
set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
pf->max_pf_txqs = func_caps->common_cap.num_txq;
@@ -4741,55 +4733,12 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-/**
- * ice_wait_for_fw - wait for full FW readiness
- * @hw: pointer to the hardware structure
- * @timeout: milliseconds that can elapse before timing out
- */
-static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
-{
- int fw_loading;
- u32 elapsed = 0;
-
- while (elapsed <= timeout) {
- fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
-
- /* firmware was not yet loaded, we have to wait more */
- if (fw_loading) {
- elapsed += 100;
- msleep(100);
- continue;
- }
- return 0;
- }
-
- return -ETIMEDOUT;
-}
-
int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int err;
- err = ice_init_hw(hw);
- if (err) {
- dev_err(dev, "ice_init_hw failed: %d\n", err);
- return err;
- }
-
- /* Some cards require longer initialization times
- * due to necessity of loading FW from an external source.
- * This can take even half a minute.
- */
- if (ice_is_pf_c827(hw)) {
- err = ice_wait_for_fw(hw, 30000);
- if (err) {
- dev_err(dev, "ice_wait_for_fw timed out");
- return err;
- }
- }
-
ice_init_feature_support(pf);
err = ice_init_ddp_config(hw, pf);
@@ -4810,7 +4759,7 @@ int ice_init_dev(struct ice_pf *pf)
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
- goto err_init_pf;
+ return err;
}
pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
@@ -4834,7 +4783,7 @@ int ice_init_dev(struct ice_pf *pf)
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_interrupt_scheme;
+ goto unroll_pf_init;
}
/* In case of MSIX we are going to setup the misc vector right here
@@ -4845,17 +4794,15 @@ int ice_init_dev(struct ice_pf *pf)
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_req_irq_msix_misc;
+ goto unroll_irq_scheme_init;
}
return 0;
-err_req_irq_msix_misc:
+unroll_irq_scheme_init:
ice_clear_interrupt_scheme(pf);
-err_init_interrupt_scheme:
+unroll_pf_init:
ice_deinit_pf(pf);
-err_init_pf:
- ice_deinit_hw(hw);
return err;
}
@@ -5088,12 +5035,14 @@ static int ice_init_devlink(struct ice_pf *pf)
ice_devlink_init_regions(pf);
ice_devlink_register(pf);
+ ice_health_init(pf);
return 0;
}
static void ice_deinit_devlink(struct ice_pf *pf)
{
+ ice_health_deinit(pf);
ice_devlink_unregister(pf);
ice_devlink_destroy_regions(pf);
ice_devlink_unregister_params(pf);
@@ -5107,6 +5056,12 @@ static int ice_init(struct ice_pf *pf)
if (err)
return err;
+ if (pf->hw.mac_type == ICE_MAC_E830) {
+ err = pci_enable_ptm(pf->pdev, NULL);
+ if (err)
+ dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n");
+ }
+
err = ice_alloc_vsis(pf);
if (err)
goto err_alloc_vsis;
@@ -5206,11 +5161,12 @@ int ice_load(struct ice_pf *pf)
ice_napi_add(vsi);
+ ice_init_features(pf);
+
err = ice_init_rdma(pf);
if (err)
goto err_init_rdma;
- ice_init_features(pf);
ice_service_task_restart(pf);
clear_bit(ICE_DOWN, pf->state);
@@ -5218,6 +5174,7 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
err_tc_indir_block_register:
ice_unregister_netdev(vsi);
@@ -5241,14 +5198,44 @@ void ice_unload(struct ice_pf *pf)
devl_assert_locked(priv_to_devlink(pf));
- ice_deinit_features(pf);
ice_deinit_rdma(pf);
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
ice_unregister_netdev(vsi);
ice_devlink_destroy_pf_port(pf);
ice_decfg_netdev(vsi);
}
+static int ice_probe_recovery_mode(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n");
+
+ INIT_HLIST_HEAD(&pf->aq_wait_list);
+ spin_lock_init(&pf->aq_wait_lock);
+ init_waitqueue_head(&pf->aq_wait_queue);
+
+ timer_setup(&pf->serv_tmr, ice_service_timer, 0);
+ pf->serv_tmr_period = HZ;
+ INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode);
+ clear_bit(ICE_SERVICE_SCHED, pf->state);
+ err = ice_create_all_ctrlq(&pf->hw);
+ if (err)
+ return err;
+
+ scoped_guard(devl, priv_to_devlink(pf)) {
+ err = ice_init_devlink(pf);
+ if (err)
+ return err;
+ }
+
+ ice_service_task_restart(pf);
+
+ return 0;
+}
+
/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
@@ -5312,13 +5299,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
}
pci_set_master(pdev);
-
- adapter = ice_adapter_get(pdev);
- if (IS_ERR(adapter))
- return PTR_ERR(adapter);
-
pf->pdev = pdev;
- pf->adapter = adapter;
pci_set_drvdata(pdev, pf);
set_bit(ICE_DOWN, pf->state);
/* Disable service task until DOWN bit is cleared */
@@ -5346,29 +5327,47 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw->debug_mask = debug;
#endif
+ if (ice_is_recovery_mode(hw))
+ return ice_probe_recovery_mode(pf);
+
+ err = ice_init_hw(hw);
+ if (err) {
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
+ adapter = ice_adapter_get(pdev);
+ if (IS_ERR(adapter)) {
+ err = PTR_ERR(adapter);
+ goto unroll_hw_init;
+ }
+ pf->adapter = adapter;
+
err = ice_init(pf);
if (err)
- goto err_init;
+ goto unroll_adapter;
devl_lock(priv_to_devlink(pf));
err = ice_load(pf);
if (err)
- goto err_load;
+ goto unroll_init;
err = ice_init_devlink(pf);
if (err)
- goto err_init_devlink;
+ goto unroll_load;
devl_unlock(priv_to_devlink(pf));
return 0;
-err_init_devlink:
+unroll_load:
ice_unload(pf);
-err_load:
+unroll_init:
devl_unlock(priv_to_devlink(pf));
ice_deinit(pf);
-err_init:
+unroll_adapter:
ice_adapter_put(pdev);
+unroll_hw_init:
+ ice_deinit_hw(hw);
return err;
}
@@ -5448,6 +5447,14 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ if (ice_is_recovery_mode(&pf->hw)) {
+ ice_service_task_stop(pf);
+ scoped_guard(devl, priv_to_devlink(pf)) {
+ ice_deinit_devlink(pf);
+ }
+ return;
+ }
+
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -6408,10 +6415,12 @@ ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
int err = 0;
/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
- * if either bit is set
+ * if either bit is set. In switchdev mode Rx filtering should never be
+ * enabled.
*/
- if (features &
- (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
+ if ((features &
+ (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) &&
+ !ice_is_eswitch_mode_switchdev(vsi->back))
err = vlan_ops->ena_rx_filtering(vsi);
else
err = vlan_ops->dis_rx_filtering(vsi);
@@ -6565,6 +6574,18 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if (changed & NETIF_F_LOOPBACK)
ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
+ /* Due to E830 hardware limitations, TSO (NETIF_F_ALL_TSO) with GCS
+ * (NETIF_F_HW_CSUM) is not supported.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS) &&
+ ((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO))) {
+ if (netdev->features & NETIF_F_HW_CSUM)
+ dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n");
+ else
+ dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n");
+ return -EIO;
+ }
+
return ret;
}
@@ -6788,7 +6809,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
- ice_ptp_link_change(pf, pf->hw.pf_id, true);
+ ice_ptp_link_change(pf, true);
}
/* Perform an initial read of the statistics registers now to
@@ -7258,7 +7279,7 @@ int ice_down(struct ice_vsi *vsi)
if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
- ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
+ ice_ptp_link_change(vsi->back, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
}
@@ -7791,6 +7812,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* if we get here, reset flow is successful */
clear_bit(ICE_RESET_FAILED, pf->state);
+ ice_health_clear(pf);
+
ice_plug_aux_dev(pf);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
ice_lag_rebuild(pf);
@@ -8281,16 +8304,18 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
if (tx_ring) {
struct ice_hw *hw = &pf->hw;
- u32 head, val = 0;
+ u32 head, intr = 0;
head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
/* Read interrupt register */
- val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
+ intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
- head, tx_ring->next_to_use, val);
+ head, tx_ring->next_to_use, intr);
+
+ ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr);
}
pf->tx_timeout_last_recovery = jiffies;
@@ -8324,11 +8349,16 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
* @np: net device to configure
* @filter_dev: device on which filter is added
* @cls_flower: offload data
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was successfully added or deleted,
+ * negative error code otherwise.
*/
static int
ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
struct net_device *filter_dev,
- struct flow_cls_offload *cls_flower)
+ struct flow_cls_offload *cls_flower,
+ bool ingress)
{
struct ice_vsi *vsi = np->vsi;
@@ -8337,7 +8367,7 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
- return ice_add_cls_flower(filter_dev, vsi, cls_flower);
+ return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(vsi, cls_flower);
default:
@@ -8346,20 +8376,46 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
}
/**
- * ice_setup_tc_block_cb - callback handler registered for TC block
+ * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block
* @type: TC SETUP type
* @type_data: TC flower offload data that contains user input
* @cb_priv: netdev private data
+ *
+ * Return: 0 if the setup was successful, negative error code otherwise.
*/
static int
-ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
{
struct ice_netdev_priv *np = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return ice_setup_tc_cls_flower(np, np->vsi->netdev,
- type_data);
+ type_data, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_setup_tc_block_cb_egress - callback handler for egress TC block
+ * @type: TC SETUP type
+ * @type_data: TC flower offload data that contains user input
+ * @cb_priv: netdev private data
+ *
+ * Return: 0 if the setup was successful, negative error code otherwise.
+ */
+static int
+ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct ice_netdev_priv *np = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_setup_tc_cls_flower(np, np->vsi->netdev,
+ type_data, false);
default:
return -EOPNOTSUPP;
}
@@ -9304,27 +9360,45 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ enum flow_block_binder_type binder_type;
+ struct iidc_rdma_core_dev_info *cdev;
struct ice_pf *pf = np->vsi->back;
+ flow_setup_cb_t *flower_handler;
bool locked = false;
int err;
switch (type) {
case TC_SETUP_BLOCK:
+ binder_type =
+ ((struct flow_block_offload *)type_data)->binder_type;
+
+ switch (binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ flower_handler = ice_setup_tc_block_cb_ingress;
+ break;
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ flower_handler = ice_setup_tc_block_cb_egress;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
return flow_block_cb_setup_simple(type_data,
&ice_block_cb_list,
- ice_setup_tc_block_cb,
- np, np, true);
+ flower_handler,
+ np, np, false);
case TC_SETUP_QDISC_MQPRIO:
if (ice_is_eswitch_mode_switchdev(pf)) {
netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
return -EOPNOTSUPP;
}
- if (pf->adev) {
+ cdev = pf->cdev_info;
+ if (cdev && cdev->adev) {
mutex_lock(&pf->adev_mutex);
- device_lock(&pf->adev->dev);
+ device_lock(&cdev->adev->dev);
locked = true;
- if (pf->adev->dev.driver) {
+ if (cdev->adev->dev.driver) {
netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
err = -EBUSY;
goto adev_unlock;
@@ -9338,7 +9412,7 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
adev_unlock:
if (locked) {
- device_unlock(&pf->adev->dev);
+ device_unlock(&cdev->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return err;
@@ -9374,7 +9448,7 @@ ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER:
return ice_setup_tc_cls_flower(np, priv->netdev,
(struct flow_cls_offload *)
- type_data);
+ type_data, false);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h
index 6509d807627c..4f56d53d56b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_parser.h
+++ b/drivers/net/ethernet/intel/ice/ice_parser.h
@@ -257,7 +257,6 @@ ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
#define ICE_BST_TCAM_TABLE_SIZE 256
#define ICE_BST_TCAM_KEY_SIZE 20
-#define ICE_BST_KEY_TCAM_SIZE 19
/* Boost TCAM item */
struct ice_bst_tcam_item {
@@ -401,7 +400,6 @@ u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag);
#define ICE_PARSER_GPR_NUM 128
#define ICE_PARSER_FLG_NUM 64
#define ICE_PARSER_ERR_NUM 16
-#define ICE_BST_KEY_SIZE 10
#define ICE_MARKER_ID_SIZE 9
#define ICE_MARKER_MAX_SIZE \
(ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1)
@@ -431,13 +429,13 @@ struct ice_parser_rt {
u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV];
u16 pkt_len;
u16 po;
- u8 bst_key[ICE_BST_KEY_SIZE];
+ u8 bst_key[ICE_BST_TCAM_KEY_SIZE];
struct ice_pg_cam_key pg_key;
+ u8 pg_prio;
struct ice_alu *alu0;
struct ice_alu *alu1;
struct ice_alu *alu2;
struct ice_pg_cam_action *action;
- u8 pg_prio;
struct ice_gpr_pu pu;
u8 markers[ICE_MARKER_ID_SIZE];
bool protocols[ICE_PO_PAIR_SIZE];
diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
index dedf5e854e4b..3995d662e050 100644
--- a/drivers/net/ethernet/intel/ice/ice_parser_rt.c
+++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
@@ -125,22 +125,20 @@ static void ice_bst_key_init(struct ice_parser_rt *rt,
else
key[idd] = imem->b_kb.prio;
- idd = ICE_BST_KEY_TCAM_SIZE - 1;
+ idd = ICE_BST_TCAM_KEY_SIZE - 2;
for (i = idd; i >= 0; i--) {
int j;
j = ho + idd - i;
if (j < ICE_PARSER_MAX_PKT_LEN)
- key[i] = rt->pkt_buf[ho + idd - i];
+ key[i] = rt->pkt_buf[j];
else
key[i] = 0;
}
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n");
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- key[0], key[1], key[2], key[3], key[4],
- key[5], key[6], key[7], key[8], key[9]);
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n");
+ ice_debug_array_w_prefix(rt->psr->hw, ICE_DBG_PARSER,
+ KBUILD_MODNAME ": Generated Boost TCAM Key",
+ key, ICE_BST_TCAM_KEY_SIZE);
}
static u16 ice_bit_rev_u16(u16 v, int len)
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index a999fface272..55cad824c5b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -16,28 +16,28 @@ static const char ice_pin_names[][64] = {
};
static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
- /* name, gpio */
- { TIME_SYNC, { 4, -1 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { TIME_SYNC, { 4, -1 }, { 0, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 11 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
- /* name, gpio */
- { SDP0, { 0, 0 }},
- { SDP1, { 1, 1 }},
- { SDP2, { 2, 2 }},
- { SDP3, { 3, 3 }},
- { TIME_SYNC, { 4, -1 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 15, 14 }},
+ { SDP1, { 1, 1 }, { 15, 14 }},
+ { SDP2, { 2, 2 }, { 15, 14 }},
+ { SDP3, { 3, 3 }, { 15, 14 }},
+ { TIME_SYNC, { 4, -1 }, { 11, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 9 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
- /* name, gpio */
- { SDP0, { 0, 0 }},
- { SDP1, { 1, 1 }},
- { SDP2, { 2, 2 }},
- { SDP3, { 3, 3 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 0, 1 }},
+ { SDP1, { 1, 1 }, { 0, 1 }},
+ { SDP2, { 2, 2 }, { 0, 1 }},
+ { SDP3, { 3, 3 }, { 0, 1 }},
+ { ONE_PPS, { -1, 5 }, { 0, 1 }},
};
static const char ice_pin_names_nvm[][64] = {
@@ -49,12 +49,12 @@ static const char ice_pin_names_nvm[][64] = {
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
- /* name, gpio */
- { GNSS, { 1, -1 }},
- { SMA1, { 1, 0 }},
- { UFL1, { -1, 0 }},
- { SMA2, { 3, 2 }},
- { UFL2, { 3, -1 }},
+ /* name, gpio, delay */
+ { GNSS, { 1, -1 }, { 0, 0 }},
+ { SMA1, { 1, 0 }, { 0, 1 }},
+ { UFL1, { -1, 0 }, { 0, 1 }},
+ { SMA2, { 3, 2 }, { 0, 1 }},
+ { UFL2, { 3, -1 }, { 0, 0 }},
};
static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
@@ -298,18 +298,30 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
* @sts: Optional parameter for holding a pair of system timestamps from
* the system clock. Will be ignored if NULL is given.
*/
-static u64
-ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
{
struct ice_hw *hw = &pf->hw;
u32 hi, lo, lo2;
u8 tmr_idx;
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
tmr_idx = ice_get_ptp_src_clock_index(hw);
guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
/* Read the system timestamp pre PHC read */
ptp_read_system_prets(sts);
+ if (hw->mac_type == ICE_MAC_E830) {
+ u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ return clk_time;
+ }
+
lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
/* Read the system timestamp post PHC read */
@@ -464,7 +476,9 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
*/
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
{
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
+ unsigned long flags;
struct sk_buff *skb;
struct ice_pf *pf;
@@ -473,6 +487,7 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ params = &pf->hw.ptp.phy.e810;
/* Drop packets which have waited for more than 2 seconds */
if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
@@ -489,11 +504,17 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
/* Write TS index to read to the PF register so the FW can read it */
- wr32(&pf->hw, PF_SB_ATQBAL,
- TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
- TS_LL_READ_TS);
+ wr32(&pf->hw, REG_LL_PROXY_H,
+ REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
+ REG_LL_PROXY_H_EXEC);
tx->last_ll_ts_idx_read = idx;
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
}
/**
@@ -504,35 +525,52 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
{
struct skb_shared_hwtstamps shhwtstamps = {};
u8 idx = tx->last_ll_ts_idx_read;
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
u64 raw_tstamp, tstamp;
bool drop_ts = false;
struct sk_buff *skb;
+ unsigned long flags;
+ struct device *dev;
struct ice_pf *pf;
- u32 val;
+ u32 reg_ll_high;
if (!tx->init || tx->last_ll_ts_idx_read < 0)
return;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ dev = ice_pf_to_dev(pf);
+ params = &pf->hw.ptp.phy.e810;
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
- val = rd32(&pf->hw, PF_SB_ATQBAL);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
+ dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
+ __func__);
+
+ /* Read the low 32 bit value */
+ raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
+ /* Read the status together with high TS part */
+ reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
+
+ /* Wake up threads waiting on low latency interface */
+ params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
+ wake_up_locked(&params->atqbal_wq);
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
/* When the bit is cleared, the TS is ready in the register */
- if (val & TS_LL_READ_TS) {
+ if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
return;
}
/* High 8 bit value of the TS is on the bits 16:23 */
- raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
- raw_tstamp <<= 32;
-
- /* Read the low 32 bit value */
- raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
+ raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
/* Devices using this interface always verify the timestamp differs
* relative to the last cached timestamp value.
@@ -946,28 +984,6 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
- * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- * @port: the port this structure tracks
- *
- * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
- * have independent memory blocks for all ports.
- *
- * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
- */
-static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
- u8 port)
-{
- tx->block = port;
- tx->offset = 0;
- tx->len = INDEX_PER_PORT_ETH56G;
- tx->has_ready_bitmap = 1;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
* ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
@@ -977,9 +993,11 @@ static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
* the timestamp block is shared for all ports in the same quad. To avoid
* ports using the same timestamp index, logically break the block of
* registers into chunks based on the port number.
+ *
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
+ u8 port)
{
tx->block = ICE_GET_QUAD_NUM(port);
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
@@ -990,24 +1008,27 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
}
/**
- * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * ice_ptp_init_tx - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
+ * each port has its own block of timestamps, independent of the other ports.
*
- * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
- * port has its own block of timestamps, independent of the other ports.
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
- tx->block = pf->hw.port_info->lport;
+ tx->block = port;
tx->offset = 0;
- tx->len = INDEX_PER_PORT_E810;
+ tx->len = INDEX_PER_PORT;
+
/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
* verify new timestamps against cached copy of the last read
* timestamp.
*/
- tx->has_ready_bitmap = 0;
+ tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1292,20 +1313,21 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
struct ice_hw *hw = &pf->hw;
int err;
- if (ice_is_e810(hw))
- return 0;
-
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_stop_phy_timer_eth56g(hw, port, true);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
err = ice_stop_phy_timer_e82x(hw, port, true);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_stop_phy_timer_eth56g(hw, port, true);
+ break;
default:
err = -ENODEV;
}
@@ -1335,19 +1357,17 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
unsigned long flags;
int err;
- if (ice_is_e810(hw))
- return 0;
-
if (!ptp_port->link_up)
return ice_ptp_port_phy_stop(ptp_port);
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_start_phy_timer_eth56g(hw, port);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
/* Start the PHY timer in Vernier mode */
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
@@ -1372,6 +1392,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
0);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_start_phy_timer_eth56g(hw, port);
+ break;
default:
err = -ENODEV;
}
@@ -1388,10 +1411,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
/**
* ice_ptp_link_change - Reconfigure PTP after link status change
* @pf: Board private structure
- * @port: Port for which the PHY start is set
* @linkup: Link is up or down
*/
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
@@ -1399,14 +1421,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
- if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
- return;
-
ptp_port = &pf->ptp.port;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- port *= 2;
- if (WARN_ON_ONCE(ptp_port->port_num != port))
- return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
@@ -1414,12 +1429,14 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
/* Skip HW writes if reset is in progress */
if (pf->hw.reset_ongoing)
return;
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
- /* Do not reconfigure E810 PHY */
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ /* Do not reconfigure E810 or E830 PHY */
return;
- case ICE_PHY_ETH56G:
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@@ -1447,46 +1464,45 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G: {
- int port;
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ return 0;
+ case ICE_MAC_GENERIC: {
+ int quad;
- for (port = 0; port < hw->ptp.num_lports; port++) {
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
+ quad++) {
int err;
- err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
+ err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
- port, err);
+ dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
+ quad, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E82X: {
- int quad;
+ case ICE_MAC_GENERIC_3K_E825: {
+ int port;
- for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
- quad++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
- err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
+ err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
- quad, err);
+ dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
+ port, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E810:
- return 0;
- case ICE_PHY_UNSUP:
+ case ICE_MAC_UNKNOWN:
default:
- dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
- ice_get_phy_model(hw));
return -EOPNOTSUPP;
}
}
@@ -1566,18 +1582,29 @@ void ice_ptp_extts_event(struct ice_pf *pf)
* Event is defined in GLTSYN_EVNT_0 register
*/
for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
+ int pin_desc_idx;
+
/* Check if channel is enabled */
- if (pf->ptp.ext_ts_irq & (1 << chan)) {
- lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
- hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
- event.timestamp = (((u64)hi) << 32) | lo;
- event.type = PTP_CLOCK_EXTTS;
- event.index = chan;
-
- /* Fire event */
- ptp_clock_event(pf->ptp.clock, &event);
- pf->ptp.ext_ts_irq &= ~(1 << chan);
+ if (!(pf->ptp.ext_ts_irq & (1 << chan)))
+ continue;
+
+ lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
+ hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
+ event.timestamp = (u64)hi << 32 | lo;
+
+ /* Add delay compensation */
+ pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
+ if (pin_desc_idx >= 0) {
+ const struct ice_ptp_pin_desc *desc;
+
+ desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
+ event.timestamp -= desc->delay[0];
}
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = chan;
+ pf->ptp.ext_ts_irq &= ~(1 << chan);
+ ptp_clock_event(pf->ptp.clock, &event);
}
}
@@ -1600,14 +1627,6 @@ static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
int pin_desc_idx;
u8 tmr_idx;
- /* Reject requests with unsupported flags */
-
- if (rq->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
chan = rq->index;
@@ -1711,7 +1730,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
/* 0. Reset mode & out_en in AUX_OUT */
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
- if (ice_is_e825c(hw)) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
int err;
/* Enable/disable CGU 1PPS output for E825C */
@@ -1754,6 +1773,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
8 + chan + (tmr_idx * 4));
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+ ice_flush(hw);
return 0;
}
@@ -1772,19 +1792,17 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
int on)
{
+ unsigned int gpio_pin, prop_delay_ns;
u64 clk, period, start, phase;
struct ice_hw *hw = &pf->hw;
- unsigned int gpio_pin;
int pin_desc_idx;
- if (rq->flags & ~PTP_PEROUT_PHASE)
- return -EOPNOTSUPP;
-
pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
if (pin_desc_idx < 0)
return -EIO;
gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
+ prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
/* If we're disabling the output or period is 0, clear out CLKO and TGT
@@ -1794,7 +1812,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
- period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) {
+ period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
return -EOPNOTSUPP;
}
@@ -1813,14 +1831,15 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
div64_u64_rem(start, period, &phase);
/* If we have only phase or start time is in the past, start the timer
- * at the next multiple of period, maintaining phase.
+ * at the next multiple of period, maintaining phase at least 0.5 second
+ * from now, so we have time to write it to HW.
*/
- clk = ice_ptp_read_src_clk_reg(pf, NULL);
- if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw))
+ clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
+ if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
start = div64_u64(clk + period - 1, period) * period + phase;
/* Compensate for propagation delay from the generator to the pin. */
- start -= ice_prop_delay(hw);
+ start -= prop_delay_ns;
return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
}
@@ -2048,7 +2067,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* For Vernier mode on E82X, we need to recalibrate after new settime.
* Start with marking timestamps as invalid.
*/
- if (ice_get_phy_model(hw) == ICE_PHY_E82X) {
+ if (hw->mac_type == ICE_MAC_GENERIC) {
err = ice_ptp_clear_phy_offset_ready_e82x(hw);
if (err)
dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
@@ -2072,7 +2091,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_perout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (ice_get_phy_model(hw) == ICE_PHY_E82X)
+ if (hw->mac_type == ICE_MAC_GENERIC)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2150,93 +2169,158 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return 0;
}
+/**
+ * struct ice_crosststamp_cfg - Device cross timestamp configuration
+ * @lock_reg: The hardware semaphore lock to use
+ * @lock_busy: Bit in the semaphore lock indicating the lock is busy
+ * @ctl_reg: The hardware register to request cross timestamp
+ * @ctl_active: Bit in the control register to request cross timestamp
+ * @art_time_l: Lower 32-bits of ART system time
+ * @art_time_h: Upper 32-bits of ART system time
+ * @dev_time_l: Lower 32-bits of device time (per timer index)
+ * @dev_time_h: Upper 32-bits of device time (per timer index)
+ */
+struct ice_crosststamp_cfg {
+ /* HW semaphore lock register */
+ u32 lock_reg;
+ u32 lock_busy;
+
+ /* Capture control register */
+ u32 ctl_reg;
+ u32 ctl_active;
+
+ /* Time storage */
+ u32 art_time_l;
+ u32 art_time_h;
+ u32 dev_time_l[2];
+ u32 dev_time_h[2];
+};
+
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
+ .lock_reg = PFHH_SEM,
+ .lock_busy = PFHH_SEM_BUSY_M,
+ .ctl_reg = GLHH_ART_CTL,
+ .ctl_active = GLHH_ART_CTL_ACTIVE_M,
+ .art_time_l = GLHH_ART_TIME_L,
+ .art_time_h = GLHH_ART_TIME_H,
+ .dev_time_l[0] = GLTSYN_HHTIME_L(0),
+ .dev_time_h[0] = GLTSYN_HHTIME_H(0),
+ .dev_time_l[1] = GLTSYN_HHTIME_L(1),
+ .dev_time_h[1] = GLTSYN_HHTIME_H(1),
+};
+
#ifdef CONFIG_ICE_HWTS
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
+ .lock_reg = E830_PFPTM_SEM,
+ .lock_busy = E830_PFPTM_SEM_BUSY_M,
+ .ctl_reg = E830_GLPTM_ART_CTL,
+ .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
+ .art_time_l = E830_GLPTM_ART_TIME_L,
+ .art_time_h = E830_GLPTM_ART_TIME_H,
+ .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
+ .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
+ .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
+ .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
+};
+
+#endif /* CONFIG_ICE_HWTS */
+/**
+ * struct ice_crosststamp_ctx - Device cross timestamp context
+ * @snapshot: snapshot of system clocks for historic interpolation
+ * @pf: pointer to the PF private structure
+ * @cfg: pointer to hardware configuration for cross timestamp
+ */
+struct ice_crosststamp_ctx {
+ struct system_time_snapshot snapshot;
+ struct ice_pf *pf;
+ const struct ice_crosststamp_cfg *cfg;
+};
+
/**
- * ice_ptp_get_syncdevicetime - Get the cross time stamp info
+ * ice_capture_crosststamp - Capture a device/system cross timestamp
* @device: Current device time
* @system: System counter value read synchronously with device time
- * @ctx: Context provided by timekeeping code
+ * @__ctx: Context passed from ice_ptp_getcrosststamp
*
* Read device and system (ART) clock simultaneously and return the corrected
* clock values in ns.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_get_syncdevicetime(ktime_t *device,
- struct system_counterval_t *system,
- void *ctx)
+static int ice_capture_crosststamp(ktime_t *device,
+ struct system_counterval_t *system,
+ void *__ctx)
{
- struct ice_pf *pf = (struct ice_pf *)ctx;
- struct ice_hw *hw = &pf->hw;
- u32 hh_lock, hh_art_ctl;
- int i;
+ struct ice_crosststamp_ctx *ctx = __ctx;
+ const struct ice_crosststamp_cfg *cfg;
+ u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+ u64 ts;
-#define MAX_HH_HW_LOCK_TRIES 5
-#define MAX_HH_CTL_LOCK_TRIES 100
+ cfg = ctx->cfg;
+ pf = ctx->pf;
+ hw = &pf->hw;
- for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
- /* Get the HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- if (hh_lock & PFHH_SEM_BUSY_M) {
- usleep_range(10000, 15000);
- continue;
- }
- break;
- }
- if (hh_lock & PFHH_SEM_BUSY_M) {
- dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (tmr_idx > 1)
+ return -EINVAL;
+
+ /* Poll until we obtain the cross-timestamp hardware semaphore */
+ err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
+ !(lock & cfg->lock_busy),
+ 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
return -EBUSY;
}
+ /* Snapshot system time for historic interpolation */
+ ktime_get_snapshot(&ctx->snapshot);
+
/* Program cmd to master timer */
ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
/* Start the ART and device clock sync sequence */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
- wr32(hw, GLHH_ART_CTL, hh_art_ctl);
-
- for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
- /* Wait for sync to complete */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
- udelay(1);
- continue;
- } else {
- u32 hh_ts_lo, hh_ts_hi, tmr_idx;
- u64 hh_ts;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- /* Read ART time */
- hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
- hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- system->cycles = hh_ts;
- system->cs_id = CSID_X86_ART;
- /* Read Device source clock time */
- hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
- hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- *device = ns_to_ktime(hh_ts);
- break;
- }
- }
+ ctl = rd32(hw, cfg->ctl_reg);
+ ctl |= cfg->ctl_active;
+ wr32(hw, cfg->ctl_reg, ctl);
+ /* Poll until hardware completes the capture */
+ err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
+ 5, 20 * USEC_PER_MSEC);
+ if (err)
+ goto err_timeout;
+
+ /* Read ART system time */
+ ts_lo = rd32(hw, cfg->art_time_l);
+ ts_hi = rd32(hw, cfg->art_time_h);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ system->cycles = ts;
+ system->cs_id = CSID_X86_ART;
+ system->use_nsecs = true;
+
+ /* Read Device source clock time */
+ ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
+ ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ *device = ns_to_ktime(ts);
+
+err_timeout:
/* Clear the master timer */
ice_ptp_src_cmd(hw, ICE_PTP_NOP);
/* Release HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
- wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
+ lock = rd32(hw, cfg->lock_reg);
+ lock &= ~cfg->lock_busy;
+ wr32(hw, cfg->lock_reg, lock);
- if (i == MAX_HH_CTL_LOCK_TRIES)
- return -ETIMEDOUT;
-
- return 0;
+ return err;
}
/**
- * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
+ * ice_ptp_getcrosststamp - Capture a device cross timestamp
* @info: the driver's PTP info structure
* @cts: The memory to fill the cross timestamp info
*
@@ -2244,22 +2328,36 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
* clock. Fill the cross timestamp information and report it back to the
* caller.
*
- * This is only valid for E822 and E823 devices which have support for
- * generating the cross timestamp via PCIe PTM.
- *
* In order to correctly correlate the ART timestamp back to the TSC time, the
* CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
- struct system_device_crosststamp *cts)
+static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_crosststamp_ctx ctx = {
+ .pf = pf,
+ };
+
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ ctx.cfg = &ice_crosststamp_cfg_e82x;
+ break;
+#ifdef CONFIG_ICE_HWTS
+ case ICE_MAC_E830:
+ ctx.cfg = &ice_crosststamp_cfg_e830;
+ break;
+#endif /* CONFIG_ICE_HWTS */
+ default:
+ return -EOPNOTSUPP;
+ }
- return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
- pf, NULL, cts);
+ return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
+ &ctx.snapshot, cts);
}
-#endif /* CONFIG_ICE_HWTS */
/**
* ice_ptp_get_ts_config - ioctl interface to read the timestamping config
@@ -2520,13 +2618,9 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
*/
static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
{
-#ifdef CONFIG_ICE_HWTS
- if (boot_cpu_has(X86_FEATURE_ART) &&
- boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
- pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x;
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
-#endif /* CONFIG_ICE_HWTS */
- if (ice_is_e825c(&pf->hw)) {
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
} else {
@@ -2593,6 +2687,28 @@ err:
}
/**
+ * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
+ * @pf: Board private structure
+ *
+ * Assign functions to the PTP capabiltiies structure for E830 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E830
+ * devices.
+ */
+static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
+{
+#ifdef CONFIG_ICE_HWTS
+ if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
+
+#endif /* CONFIG_ICE_HWTS */
+ /* Rest of the config is the same as base E810 */
+ pf->ptp.ice_pin_desc = ice_pin_desc_e810;
+ pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
+ ice_ptp_setup_pin_cfg(pf);
+}
+
+/**
* ice_ptp_set_caps - Set PTP capabilities
* @pf: Board private structure
*/
@@ -2614,10 +2730,25 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->enable = ice_ptp_gpio_enable;
info->verify = ice_verify_pin;
- if (ice_is_e810(&pf->hw))
+ info->supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ info->supported_perout_flags = PTP_PEROUT_PHASE;
+
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_E810:
ice_ptp_set_funcs_e810(pf);
- else
+ return;
+ case ICE_MAC_E830:
+ ice_ptp_set_funcs_e830(pf);
+ return;
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_set_funcs_e82x(pf);
+ return;
+ default:
+ return;
+ }
}
/**
@@ -2728,6 +2859,65 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
}
/**
+ * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
+ * @pf: Board private structure
+ *
+ * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
+ * half of the interrupt and IRQ_HANDLED otherwise.
+ */
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ /* E810 capable of low latency timestamping with interrupt can
+ * request a single timestamp in the top half and wait for
+ * a second LL TS interrupt from the FW when it's ready.
+ */
+ if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
+ struct ice_ptp_tx *tx = &pf->ptp.port.tx;
+ u8 idx;
+
+ if (!ice_pf_state_is_nominal(pf))
+ return IRQ_HANDLED;
+
+ spin_lock(&tx->lock);
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ spin_unlock(&tx->lock);
+
+ return IRQ_HANDLED;
+ }
+ fallthrough; /* non-LL_TS E810 */
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ /* All other devices process timestamps in the bottom half due
+ * to sleeping or polling.
+ */
+ if (!ice_ptp_pf_handles_tx_interrupt(pf))
+ return IRQ_HANDLED;
+
+ set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
+ return IRQ_WAKE_THREAD;
+ case ICE_MAC_E830:
+ /* E830 can read timestamps in the top half using rd32() */
+ if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
+ /* Process outstanding Tx timestamps. If there
+ * is more work, re-arm the interrupt to trigger again.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
+ return IRQ_HANDLED;
+ default:
+ return IRQ_HANDLED;
+ }
+}
+
+/**
* ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
* @pf: Board private structure
*
@@ -2747,7 +2937,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
bool trigger_oicr = false;
unsigned int i;
- if (ice_is_e810(hw))
+ if (!pf->ptp.port.tx.has_ready_bitmap)
return;
if (!ice_pf_src_tmr_owned(pf))
@@ -2794,6 +2984,32 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
+ * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild
+ * @pf: Board private structure
+ * @rebuild: rebuild if true, prepare if false
+ * @reset_type: the reset type being performed
+ */
+static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
+ enum ice_reset_req reset_type)
+{
+ struct list_head *entry;
+
+ list_for_each(entry, &pf->adapter->ports.ports) {
+ struct ice_ptp_port *port = list_entry(entry,
+ struct ice_ptp_port,
+ list_node);
+ struct ice_pf *peer_pf = ptp_port_to_pf(port);
+
+ if (!ice_is_primary(&peer_pf->hw)) {
+ if (rebuild)
+ ice_ptp_rebuild(peer_pf, reset_type);
+ else
+ ice_ptp_prepare_for_reset(peer_pf, reset_type);
+ }
+ }
+}
+
+/**
* ice_ptp_prepare_for_reset - Prepare PTP for reset
* @pf: Board private structure
* @reset_type: the reset type being performed
@@ -2801,6 +3017,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_ptp *ptp = &pf->ptp;
+ struct ice_hw *hw = &pf->hw;
u8 src_tmr;
if (ptp->state != ICE_PTP_READY)
@@ -2816,6 +3033,9 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
if (reset_type == ICE_RESET_PFR)
return;
+ if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825)
+ ice_ptp_prepare_rebuild_sec(pf, false, reset_type);
+
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
/* Disable periodic outputs */
@@ -2882,14 +3102,12 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
*/
ice_ptp_flush_all_tx_tracker(pf);
- if (!ice_is_e810(hw)) {
- /* Enable quad interrupts */
- err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
- if (err)
- return err;
+ /* Enable quad interrupts */
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
+ if (err)
+ return err;
- ice_ptp_restart_all_phy(pf);
- }
+ ice_ptp_restart_all_phy(pf);
/* Re-enable all periodic outputs and external timestamp events */
ice_ptp_enable_all_perout(pf);
@@ -2939,12 +3157,6 @@ err:
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
-static bool ice_is_primary(struct ice_hw *hw)
-{
- return ice_is_e825c(hw) && ice_is_dual(hw) ?
- !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true;
-}
-
static int ice_ptp_setup_adapter(struct ice_pf *pf)
{
if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
@@ -2960,7 +3172,7 @@ static int ice_ptp_setup_pf(struct ice_pf *pf)
struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
struct ice_ptp *ptp = &pf->ptp;
- if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP)
+ if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
return -ENODEV;
INIT_LIST_HEAD(&ptp->port.list_node);
@@ -2977,7 +3189,7 @@ static void ice_ptp_cleanup_pf(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
- if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) {
+ if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
mutex_lock(&pf->adapter->ports.lock);
list_del(&ptp->port.list_node);
mutex_unlock(&pf->adapter->ports.lock);
@@ -3080,7 +3292,7 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
/* Allocate a kworker for handling work required for the ports
* connected to the PTP hardware clock.
*/
- kworker = kthread_create_worker(0, "ice-ptp-%s",
+ kworker = kthread_run_worker(0, "ice-ptp-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
@@ -3097,6 +3309,8 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
* ice_ptp_init_port - Initialize PTP port structure
* @pf: Board private structure
* @ptp_port: PTP port structure
+ *
+ * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
*/
static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
{
@@ -3104,16 +3318,14 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
- ptp_port->port_num);
- case ICE_PHY_E810:
- return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
+ case ICE_MAC_GENERIC:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
-
return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
@@ -3132,8 +3344,8 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
- switch (ice_get_phy_model(&pf->hw)) {
- case ICE_PHY_E82X:
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
@@ -3168,6 +3380,12 @@ void ice_ptp_init(struct ice_pf *pf)
ptp->state = ICE_PTP_INITIALIZING;
+ if (hw->lane_num < 0) {
+ err = hw->lane_num;
+ goto err_exit;
+ }
+ ptp->port.port_num = hw->lane_num;
+
ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3188,10 +3406,6 @@ void ice_ptp_init(struct ice_pf *pf)
if (err)
goto err_exit;
- ptp->port.port_num = hw->pf_id;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- ptp->port.port_num = hw->pf_id * 2;
-
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
goto err_exit;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 824e73b677a4..3b769a0cad00 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -128,8 +128,7 @@ struct ice_ptp_tx {
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT_E82X 16
-#define INDEX_PER_PORT_E810 64
-#define INDEX_PER_PORT_ETH56G 64
+#define INDEX_PER_PORT 64
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
@@ -211,6 +210,7 @@ enum ice_ptp_pin_nvm {
* struct ice_ptp_pin_desc - hardware pin description data
* @name_idx: index of the name of pin in ice_pin_names
* @gpio: the associated GPIO input and output pins
+ * @delay: input and output signal delays in nanoseconds
*
* Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array
* for the device. Device families have separate sets of available pins with
@@ -219,6 +219,7 @@ enum ice_ptp_pin_nvm {
struct ice_ptp_pin_desc {
int name_idx;
int gpio[2];
+ unsigned int delay[2];
};
/**
@@ -302,6 +303,9 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx);
void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx);
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf);
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts);
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx);
@@ -310,7 +314,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf,
enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
@@ -340,6 +344,17 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf)
return true;
}
+static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ return IRQ_HANDLED;
+}
+
+static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
+{
+ return 0;
+}
+
static inline u64
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx)
@@ -358,7 +373,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
-static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 585ce200c60f..003cdfada3ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -10,70 +10,25 @@
/* Constants defined for the PTP 1588 clock hardware. */
const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = {
- /* ETH56G_PHY_REG_PTP */
- {
- /* base_addr */
- {
- 0x092000,
- 0x126000,
- 0x1BA000,
- 0x24E000,
- 0x2E2000,
- },
- /* step */
- 0x98,
+ [ETH56G_PHY_REG_PTP] = {
+ .base_addr = 0x092000,
+ .step = 0x98,
},
- /* ETH56G_PHY_MEM_PTP */
- {
- /* base_addr */
- {
- 0x093000,
- 0x127000,
- 0x1BB000,
- 0x24F000,
- 0x2E3000,
- },
- /* step */
- 0x200,
+ [ETH56G_PHY_MEM_PTP] = {
+ .base_addr = 0x093000,
+ .step = 0x200,
},
- /* ETH56G_PHY_REG_XPCS */
- {
- /* base_addr */
- {
- 0x000000,
- 0x009400,
- 0x128000,
- 0x1BC000,
- 0x250000,
- },
- /* step */
- 0x21000,
+ [ETH56G_PHY_REG_XPCS] = {
+ .base_addr = 0x000000,
+ .step = 0x21000,
},
- /* ETH56G_PHY_REG_MAC */
- {
- /* base_addr */
- {
- 0x085000,
- 0x119000,
- 0x1AD000,
- 0x241000,
- 0x2D5000,
- },
- /* step */
- 0x1000,
+ [ETH56G_PHY_REG_MAC] = {
+ .base_addr = 0x085000,
+ .step = 0x1000,
},
- /* ETH56G_PHY_REG_GPCS */
- {
- /* base_addr */
- {
- 0x084000,
- 0x118000,
- 0x1AC000,
- 0x240000,
- 0x2D4000,
- },
- /* step */
- 0x400,
+ [ETH56G_PHY_REG_GPCS] = {
+ .base_addr = 0x084000,
+ .step = 0x400,
},
};
@@ -131,7 +86,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
.rx_offset = {
.serdes = 0xffffeb27, /* -10.42424 */
.no_fec = 0xffffcccd, /* -25.6 */
- .fc = 0xfffe0014, /* -255.96 */
+ .fc = 0xfffc557b, /* -469.26 */
.sfd = 0x4a4, /* 2.32 */
.bs_ds = 0x32 /* 0.0969697 */
}
@@ -341,8 +296,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
823437500, /* 823.4375 MHz PLL */
/* nominal_incval */
0x136e44fabULL,
- /* pps_delay */
- 11,
},
/* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
@@ -351,8 +304,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
},
/* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
@@ -361,8 +312,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
796875000, /* 796.875 MHz */
/* nominal_incval */
0x141414141ULL,
- /* pps_delay */
- 12,
},
/* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
@@ -371,8 +320,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
816000000, /* 816 MHz */
/* nominal_incval */
0x139b9b9baULL,
- /* pps_delay */
- 12,
},
/* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
@@ -381,8 +328,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
830078125, /* 830.78125 MHz */
/* nominal_incval */
0x134679aceULL,
- /* pps_delay */
- 11,
},
/* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
@@ -391,8 +336,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
},
};
@@ -761,9 +704,9 @@ const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
/* rx_desk_rsgb_par */
644531250, /* 644.53125 MHz Reed Solomon gearbox */
/* tx_desk_rsgb_pcs */
- 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 390625000, /* 390.625 MHz Reed Solomon gearbox */
/* rx_desk_rsgb_pcs */
- 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 390625000, /* 390.625 MHz Reed Solomon gearbox */
/* tx_fixed_delay */
1620,
/* pmd_adj_divisor */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index dfd49732bd5b..ccac84eb34c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -240,7 +240,7 @@ static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg = {
.opcode = ice_sbq_msg_rd,
- .dest_dev = cgu,
+ .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr
};
int err;
@@ -272,7 +272,7 @@ static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg = {
.opcode = ice_sbq_msg_wr,
- .dest_dev = cgu,
+ .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr,
.data = val
};
@@ -391,7 +391,7 @@ static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw24.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -469,7 +469,7 @@ static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw24.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -546,7 +546,7 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw23.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -651,7 +651,7 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw23.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -746,7 +746,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw)
int err;
/* Disable sticky lock detection so lock err reported is accurate */
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
else
err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
@@ -756,7 +756,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw)
/* Configure the CGU PLL using the parameters from the function
* capabilities.
*/
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
(enum ice_clk_src)ts_info->clk_src);
else
@@ -827,8 +827,9 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
/* Certain hardware families share the same register values for the
* port register and source timer register.
*/
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
default:
break;
@@ -873,8 +874,12 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
*/
void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd);
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
wr32(hw, GLTSYN_CMD, cmd_val);
}
@@ -890,41 +895,78 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
{
struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
ice_flush(hw);
}
+/**
+ * ice_ptp_cfg_sync_delay - Configure PHC to PHY synchronization delay
+ * @hw: pointer to HW struct
+ * @delay: delay between PHC and PHY SYNC command execution in nanoseconds
+ */
+static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay)
+{
+ wr32(hw, GLTSYN_SYNC_DLAY, delay);
+ ice_flush(hw);
+}
+
/* 56G PHY device functions
*
* The following functions operate on devices with the ETH 56G PHY.
*/
/**
+ * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number
+ * @hw: pointer to the HW struct
+ * @port: destination port
+ *
+ * Return: destination sideband queue PHY device.
+ */
+static enum ice_sbq_dev_id ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
+ u8 port)
+{
+ u8 curr_phy, tgt_phy;
+
+ tgt_phy = port >= hw->ptp.ports_per_phy;
+ curr_phy = hw->lane_num >= hw->ptp.ports_per_phy;
+ /* In the driver, lanes 4..7 are in fact 0..3 on a second PHY.
+ * On a single complex E825C, PHY 0 is always destination device phy_0
+ * and PHY 1 is phy_0_peer.
+ * On dual complex E825C, device phy_0 points to PHY on a current
+ * complex and phy_0_peer to PHY on a different complex.
+ */
+ if ((!ice_is_dual(hw) && tgt_phy == 1) ||
+ (ice_is_dual(hw) && tgt_phy != curr_phy))
+ return ice_sbq_dev_phy_0_peer;
+ else
+ return ice_sbq_dev_phy_0;
+}
+
+/**
* ice_write_phy_eth56g - Write a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to write to PHY
*/
-static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 val)
+static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_wr,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr),
+ .data = val
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_wr;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = val;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
-
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
@@ -935,41 +977,36 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
/**
* ice_read_phy_eth56g - Read a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to read from PHY
*/
-static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 *val)
+static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr)
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_rd;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = 0;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
- if (err) {
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
- return err;
- }
-
- *val = phy_msg.data;
+ else
+ *val = msg.data;
- return 0;
+ return err;
}
/**
* ice_phy_res_address_eth56g - Calculate a PHY port register address
- * @port: Port number to be written
+ * @hw: pointer to the HW struct
+ * @lane: Lane number to be written
* @res_type: resource type (register/memory)
* @offset: Offset from PHY port register base
* @addr: The result address
@@ -978,17 +1015,19 @@ static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
* * %0 - success
* * %EINVAL - invalid port number or resource type
*/
-static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
- u32 offset, u32 *addr)
+static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
+ enum eth56g_res_type res_type,
+ u32 offset,
+ u32 *addr)
{
- u8 lane = port % ICE_PORTS_PER_QUAD;
- u8 phy = ICE_GET_QUAD_NUM(port);
-
if (res_type >= NUM_ETH56G_PHY_RES)
return -EINVAL;
- *addr = eth56g_phy_res[res_type].base[phy] +
+ /* Lanes 4..7 are in fact 0..3 on a second PHY */
+ lane %= hw->ptp.ports_per_phy;
+ *addr = eth56g_phy_res[res_type].base_addr +
lane * eth56g_phy_res[res_type].step + offset;
+
return 0;
}
@@ -1008,19 +1047,17 @@ static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_write_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_write_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1039,19 +1076,17 @@ static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 *val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_read_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_read_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1201,6 +1236,56 @@ static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
}
/**
+ * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
+
+ return ice_write_phy_eth56g(hw, port, addr, val);
+}
+
+/**
+ * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to read
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 *val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
+
+ return ice_read_phy_eth56g(hw, port, addr, val);
+}
+
+/**
* ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 64bit register
@@ -1518,8 +1603,8 @@ static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
-
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -1918,7 +2003,6 @@ ice_phy_get_speed_eth56g(struct ice_link_status *li)
*/
static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 val;
int err;
@@ -1933,8 +2017,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
case ICE_ETH56G_LNK_SPD_1G:
case ICE_ETH56G_LNK_SPD_2_5G:
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, &val);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1945,8 +2029,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
ICE_ETH56G_NOMINAL_TX_THRESH);
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1987,50 +2071,47 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
*/
int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
- u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1);
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr, val, peer_delay;
bool enable, sfd_ena;
- u32 val, peer_delay;
int err;
enable = hw->ptp.phy.eth56g.onestep_ena;
peer_delay = hw->ptp.phy.eth56g.peer_delay;
sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
- /* PHY_PTP_1STEP_CONFIG */
- err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val);
+ addr = PHY_PTP_1STEP_CONFIG;
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
if (enable)
- val |= blk_port;
+ val |= BIT(quad_lane);
else
- val &= ~blk_port;
+ val &= ~BIT(quad_lane);
val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
- err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_PTP_1STEP_PEER_DELAY */
+ addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane);
val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
if (peer_delay)
val |= PHY_PTP_1STEP_PD_ADD_PD_M;
val |= PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_MAC_XIF_MODE */
- err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val);
+ addr = PHY_MAC_XIF_MODE;
+ err = ice_read_mac_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
@@ -2050,7 +2131,7 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
- return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val);
+ return ice_write_mac_reg_eth56g(hw, port, addr, val);
}
/**
@@ -2092,21 +2173,22 @@ static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
bool fc, bool rs,
enum ice_eth56g_link_spd spd)
{
- u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1);
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 bitslip;
int err;
if (!bs || rs)
return 0;
- if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G)
+ if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) {
err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
&bitslip);
- else
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_REG_SD_BIT_SLIP(port_offset),
- &bitslip);
+ } else {
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr;
+
+ addr = PHY_REG_SD_BIT_SLIP(quad_lane);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip);
+ }
if (err)
return 0;
@@ -2350,6 +2432,7 @@ int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold)
static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
u64 *phy_time, u64 *phc_time)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u64 tx_time, rx_time;
u32 zo, lo;
u8 tmr_idx;
@@ -2369,8 +2452,13 @@ static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
ice_ptp_exec_tmr_cmd(hw);
/* Read the captured PHC time from the shadow time registers */
- zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
- lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ if (ice_is_primary(hw)) {
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ } else {
+ zo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_L(tmr_idx));
+ }
*phc_time = (u64)lo << 32 | zo;
/* Read the captured PHY time from the PHY shadow registers */
@@ -2507,6 +2595,7 @@ int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset)
*/
int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 lo, hi;
u64 incval;
u8 tmr_idx;
@@ -2532,8 +2621,13 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
if (err)
return err;
- lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
- hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ if (ice_is_primary(hw)) {
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ } else {
+ lo = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_H(tmr_idx));
+ }
incval = (u64)hi << 32 | lo;
err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval);
@@ -2564,37 +2658,15 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
}
/**
- * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access
+ * ice_ptp_init_phc_e825 - Perform E825 specific PHC initialization
* @hw: pointer to HW struct
- * @enable: Enable or disable access
*
- * Enable sideband devices (PHY and others) access.
- */
-static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
-{
- u32 val = rd32(hw, PF_SB_REM_DEV_CTL);
-
- if (enable)
- val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1);
- else
- val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1));
-
- wr32(hw, PF_SB_REM_DEV_CTL, val);
-}
-
-/**
- * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization
- * @hw: pointer to HW struct
- *
- * Perform PHC initialization steps specific to E82X devices.
+ * Perform E825-specific PTP hardware clock initialization steps.
*
- * Return:
- * * %0 - success
- * * %other - failed to initialize CGU
+ * Return: 0 on success, negative error code otherwise.
*/
-static int ice_ptp_init_phc_eth56g(struct ice_hw *hw)
+static int ice_ptp_init_phc_e825(struct ice_hw *hw)
{
- ice_sb_access_ena_eth56g(hw, true);
/* Initialize the Clock Generation Unit */
return ice_init_cgu_e82x(hw);
}
@@ -2666,59 +2738,21 @@ static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
}
/**
- * ice_is_muxed_topo - detect breakout 2x50G topology for E825C
- * @hw: pointer to the HW struct
- *
- * Return: true if it's 2x50 breakout topology, false otherwise
- */
-static bool ice_is_muxed_topo(struct ice_hw *hw)
-{
- u8 link_topo;
- bool mux;
- u32 val;
-
- val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
- mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val);
- val = rd32(hw, GLGEN_MAC_LINK_TOPO);
- link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val);
-
- return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS);
-}
-
-/**
- * ice_ptp_init_phy_e825c - initialize PHY parameters
+ * ice_ptp_init_phy_e825 - initialize PHY parameters
* @hw: pointer to the HW struct
*/
-static void ice_ptp_init_phy_e825c(struct ice_hw *hw)
+static void ice_ptp_init_phy_e825(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
struct ice_eth56g_params *params;
- u8 phy;
- ptp->phy_model = ICE_PHY_ETH56G;
params = &ptp->phy.eth56g;
params->onestep_ena = false;
params->peer_delay = 0;
params->sfd_ena = false;
- params->phy_addr[0] = eth56g_phy_0;
- params->phy_addr[1] = eth56g_phy_1;
params->num_phys = 2;
ptp->ports_per_phy = 4;
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
-
- ice_sb_access_ena_eth56g(hw, true);
- for (phy = 0; phy < params->num_phys; phy++) {
- u32 phy_rev;
- int err;
-
- err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev);
- if (err || phy_rev != PHY_REVISION_ETH56G) {
- ptp->phy_model = ICE_PHY_UNSUP;
- return;
- }
- }
-
- ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw);
}
/* E822 family functions
@@ -2737,10 +2771,9 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
struct ice_sbq_msg_input *msg, u8 port,
u16 offset)
{
- int phy_port, phy, quadtype;
+ int phy_port, quadtype;
phy_port = port % hw->ptp.ports_per_phy;
- phy = port / hw->ptp.ports_per_phy;
quadtype = ICE_GET_QUAD_NUM(port) %
ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
@@ -2752,12 +2785,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
}
- if (phy == 0)
- msg->dest_dev = rmn_0;
- else if (phy == 1)
- msg->dest_dev = rmn_1;
- else
- msg->dest_dev = rmn_2;
+ msg->dest_dev = ice_sbq_dev_phy_0;
}
/**
@@ -3080,7 +3108,7 @@ static int ice_fill_quad_msg_e82x(struct ice_hw *hw,
if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports))
return -EINVAL;
- msg->dest_dev = rmn_0;
+ msg->dest_dev = ice_sbq_dev_phy_0;
if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy)))
addr = Q_0_BASE + offset;
@@ -3199,7 +3227,8 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo);
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -4772,7 +4801,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold)
*/
static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E82X;
ptp->num_lports = 8;
ptp->ports_per_phy = 8;
}
@@ -4799,7 +4827,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_rd;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
@@ -4829,7 +4857,7 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_wr;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
msg.data = val;
err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
@@ -4856,33 +4884,46 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
static int
ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
{
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ unsigned long flags;
u32 val;
- u8 i;
+ int err;
+
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
/* Write TS index to read to the PF register so the FW can read it */
- val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
- wr32(hw, PF_SB_ATQBAL, val);
+ val = FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
/* Read the register repeatedly until the FW provides us the TS */
- for (i = TS_LL_READ_RETRIES; i > 0; i--) {
- val = rd32(hw, PF_SB_ATQBAL);
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val), 10,
+ REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
- /* When the bit is cleared, the TS is ready in the register */
- if (!(FIELD_GET(TS_LL_READ_TS, val))) {
- /* High 8 bit value of the TS is on the bits 16:23 */
- *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = FIELD_GET(REG_LL_PROXY_H_TS_HIGH, val);
- /* Read the low 32 bit value and set the TS valid bit */
- *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
- return 0;
- }
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, REG_LL_PROXY_L) | TS_VALID;
- udelay(10);
- }
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
- /* FW failed to provide the TS in time */
- ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
- return -EINVAL;
+ return 0;
}
/**
@@ -4953,7 +4994,8 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
*/
- *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
return 0;
}
@@ -5016,8 +5058,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw)
u8 tmr_idx;
int err;
- /* Ensure synchronization delay is zero */
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
@@ -5065,6 +5106,55 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
}
/**
+ * ice_ptp_prep_phy_adj_ll_e810 - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment value to program
+ *
+ * Use the low latency firmware interface to program PHY time adjustment to
+ * all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_adj_ll_e810(struct ice_hw *hw, s32 adj)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, adj);
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_ADJ) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer adjustment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
* @hw: pointer to HW struct
* @adj: adjustment value to program
@@ -5082,6 +5172,9 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_adj_ll_e810(hw, adj);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Adjustments are represented as signed 2's complement values in
@@ -5105,6 +5198,56 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
}
/**
+ * ice_ptp_prep_phy_incval_ll_e810 - Prep PHY ports increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Use the low latency firmware interface to program PHY time increment value
+ * for all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_incval_ll_e810(struct ice_hw *hw, u64 incval)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, lower_32_bits(incval));
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_FREQ) |
+ FIELD_PREP(REG_LL_PROXY_H_TS_HIGH, (u8)upper_32_bits(incval)) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer increment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
* @hw: pointer to HW struct
* @incval: The new 40bit increment value to prepare
@@ -5119,6 +5262,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_incval_ll_e810(hw, incval);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
low = lower_32_bits(incval);
high = upper_32_bits(incval);
@@ -5178,68 +5324,6 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
*/
/**
- * ice_get_pca9575_handle
- * @hw: pointer to the hw struct
- * @pca9575_handle: GPIO controller's handle
- *
- * Find and return the GPIO controller's handle in the netlist.
- * When found - the value will be cached in the hw structure and following calls
- * will return cached value
- */
-static int
-ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
-{
- struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
- int status;
- u8 idx;
-
- /* If handle was read previously return cached value */
- if (hw->io_expander_handle) {
- *pca9575_handle = hw->io_expander_handle;
- return 0;
- }
-
- /* If handle was not detected read it from the netlist */
- cmd = &desc.params.get_link_topo;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
-
- /* Set node type to GPIO controller */
- cmd->addr.topo_params.node_type_ctx =
- (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
- ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
-
-#define SW_PCA9575_SFP_TOPO_IDX 2
-#define SW_PCA9575_QSFP_TOPO_IDX 1
-
- /* Check if the SW IO expander controlling SMA exists in the netlist. */
- if (hw->device_id == ICE_DEV_ID_E810C_SFP)
- idx = SW_PCA9575_SFP_TOPO_IDX;
- else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
- idx = SW_PCA9575_QSFP_TOPO_IDX;
- else
- return -EOPNOTSUPP;
-
- cmd->addr.topo_params.index = idx;
-
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (status)
- return -EOPNOTSUPP;
-
- /* Verify if we found the right IO expander type */
- if (desc.params.get_link_topo.node_part_num !=
- ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
- return -EOPNOTSUPP;
-
- /* If present save the handle and return it */
- hw->io_expander_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
- *pca9575_handle = hw->io_expander_handle;
-
- return 0;
-}
-
-/**
* ice_read_sma_ctrl
* @hw: pointer to the hw struct
* @data: pointer to data to be read from the GPIO controller
@@ -5304,37 +5388,6 @@ int ice_write_sma_ctrl(struct ice_hw *hw, u8 data)
}
/**
- * ice_read_pca9575_reg
- * @hw: pointer to the hw struct
- * @offset: GPIO controller register offset
- * @data: pointer to data to be read from the GPIO controller
- *
- * Read the register from the GPIO controller
- */
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
-{
- struct ice_aqc_link_topo_addr link_topo;
- __le16 addr;
- u16 handle;
- int err;
-
- memset(&link_topo, 0, sizeof(link_topo));
-
- err = ice_get_pca9575_handle(hw, &handle);
- if (err)
- return err;
-
- link_topo.handle = cpu_to_le16(handle);
- link_topo.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
- ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
-
- addr = cpu_to_le16((u16)offset);
-
- return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
-}
-
-/**
* ice_ptp_read_sdp_ac - read SDP available connections section from NVM
* @hw: pointer to the HW struct
* @entries: returns the SDP available connections section from NVM
@@ -5400,16 +5453,138 @@ exit:
*/
static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E810;
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 4;
+
+ init_waitqueue_head(&ptp->phy.e810.atqbal_wq);
+}
+
+/* E830 functions
+ *
+ * The following functions operate on the E830 series devices.
+ *
+ */
+
+/**
+ * ice_ptp_init_phc_e830 - Perform E830 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E830-specific PTP hardware clock initialization steps.
+ */
+static void ice_ptp_init_phc_e830(const struct ice_hw *hw)
+{
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
+}
+
+/**
+ * ice_ptp_write_direct_incval_e830 - Prep PHY port increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Prepare the PHY port for a new increment value by programming the PHC
+ * GLTSYN_INCVAL_L and GLTSYN_INCVAL_H registers. The actual change is
+ * completed by FW automatically.
+ */
+static void ice_ptp_write_direct_incval_e830(const struct ice_hw *hw,
+ u64 incval)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_INCVAL_L(tmr_idx), lower_32_bits(incval));
+ wr32(hw, GLTSYN_INCVAL_H(tmr_idx), upper_32_bits(incval));
+}
+
+/**
+ * ice_ptp_write_direct_phc_time_e830 - Prepare PHY port with initial time
+ * @hw: Board private structure
+ * @time: Time to initialize the PHY port clock to
+ *
+ * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
+ * initial clock time. The time will not actually be programmed until the
+ * driver issues an ICE_PTP_INIT_TIME command.
+ *
+ * The time value is the upper 32 bits of the PHY timer, usually in units of
+ * nominal nanoseconds.
+ */
+static void ice_ptp_write_direct_phc_time_e830(const struct ice_hw *hw,
+ u64 time)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_TIME_0(tmr_idx), 0);
+ wr32(hw, GLTSYN_TIME_L(tmr_idx), lower_32_bits(time));
+ wr32(hw, GLTSYN_TIME_H(tmr_idx), upper_32_bits(time));
+}
+
+/**
+ * ice_ptp_port_cmd_e830 - Prepare all external PHYs for a timer command
+ * @hw: pointer to HW struct
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the external PHYs connected to this device for a timer sync
+ * command.
+ *
+ * Return: 0 on success, negative error code when PHY write failed
+ */
+static int ice_ptp_port_cmd_e830(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
+
+ return ice_write_phy_reg_e810(hw, E830_ETH_GLTSYN_CMD, val);
+}
+
+/**
+ * ice_read_phy_tstamp_e830 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E830 device.
+ */
+static void ice_read_phy_tstamp_e830(const struct ice_hw *hw, u8 idx,
+ u64 *tstamp)
+{
+ u32 hi, lo;
+
+ hi = rd32(hw, E830_PRTTSYN_TXTIME_H(idx));
+ lo = rd32(hw, E830_PRTTSYN_TXTIME_L(idx));
+
+ /* For E830 devices, the timestamp is reported with the lower 32 bits
+ * in the low register, and the upper 8 bits in the high register.
+ */
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_e830 - Read Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @tstamp_ready: contents of the Tx memory status register
+ */
+static void ice_get_phy_tx_tstamp_ready_e830(const struct ice_hw *hw, u8 port,
+ u64 *tstamp_ready)
+{
+ *tstamp_ready = rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_H);
+ *tstamp_ready <<= 32;
+ *tstamp_ready |= rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_L);
+}
+
+/**
+ * ice_ptp_init_phy_e830 - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e830(struct ice_ptp_hw *ptp)
+{
ptp->num_lports = 8;
ptp->ports_per_phy = 4;
}
/* Device agnostic functions
*
- * The following functions implement shared behavior common to both E822 and
- * E810 devices, possibly calling a device specific implementation where
- * necessary.
+ * The following functions implement shared behavior common to all devices,
+ * possibly calling a device specific implementation where necessary.
*/
/**
@@ -5472,14 +5647,22 @@ void ice_ptp_init_hw(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
- if (ice_is_e822(hw) || ice_is_e823(hw))
- ice_ptp_init_phy_e82x(ptp);
- else if (ice_is_e810(hw))
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
ice_ptp_init_phy_e810(ptp);
- else if (ice_is_e825c(hw))
- ice_ptp_init_phy_e825c(hw);
- else
- ptp->phy_model = ICE_PHY_UNSUP;
+ break;
+ case ICE_MAC_E830:
+ ice_ptp_init_phy_e830(ptp);
+ break;
+ case ICE_MAC_GENERIC:
+ ice_ptp_init_phy_e82x(ptp);
+ break;
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_init_phy_e825(hw);
+ break;
+ default:
+ return;
+ }
}
/**
@@ -5500,11 +5683,11 @@ void ice_ptp_init_hw(struct ice_hw *hw)
static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
enum ice_ptp_tmr_cmd cmd)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
return ice_ptp_write_port_cmd_e82x(hw, port, cmd);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
default:
return -EOPNOTSUPP;
}
@@ -5565,9 +5748,11 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
u32 port;
/* PHY models which can program all ports simultaneously */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_port_cmd_e810(hw, cmd);
+ case ICE_MAC_E830:
+ return ice_ptp_port_cmd_e830(hw, cmd);
default:
break;
}
@@ -5638,23 +5823,29 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Source timers */
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_phc_time_e830(hw, time);
+ return 0;
+ }
+
wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_time_eth56g(hw,
- (u32)(time & 0xFFFFFFFF));
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_time_eth56g(hw,
+ (u32)(time & 0xFFFFFFFF));
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5686,20 +5877,26 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_incval_e830(hw, incval);
+ return 0;
+ }
+
/* Shadow Adjust */
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_incval_e82x(hw, incval);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5759,16 +5956,19 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ /* E830 sync PHYs automatically after setting GLTSYN_SHADJ */
+ return 0;
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5792,13 +5992,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_read_phy_tstamp_e830(hw, idx, tstamp);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
default:
return -EOPNOTSUPP;
}
@@ -5822,13 +6025,13 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_clear_phy_tstamp_e82x(hw, block, idx);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
default:
return -EOPNOTSUPP;
}
@@ -5885,14 +6088,14 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- ice_ptp_reset_ts_memory_eth56g(hw);
- break;
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
ice_ptp_reset_ts_memory_e82x(hw);
break;
- case ICE_PHY_E810:
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_reset_ts_memory_eth56g(hw);
+ break;
+ case ICE_MAC_E810:
default:
return;
}
@@ -5914,13 +6117,16 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_phc_eth56g(hw);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_init_phc_e810(hw);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_ptp_init_phc_e830(hw);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_ptp_init_phc_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_phc_e825(hw);
default:
return -EOPNOTSUPP;
}
@@ -5939,17 +6145,19 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
- tstamp_ready);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_get_phy_tx_tstamp_ready_e830(hw, block, tstamp_ready);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
tstamp_ready);
- break;
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
+ tstamp_ready);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 47af7c5c79b8..83f20fa7ace7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -65,14 +65,14 @@ enum ice_eth56g_link_spd {
/**
* struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters
- * @base: base address for each PHY block
+ * @base_addr: base address for each PHY block
* @step: step between PHY lanes
*
* Characteristic information for the various PHY register parameters in the
* ETH56G devices
*/
struct ice_phy_reg_info_eth56g {
- u32 base[NUM_ETH56G_PHY_RES];
+ u32 base_addr;
u32 step;
};
@@ -80,7 +80,6 @@ struct ice_phy_reg_info_eth56g {
* struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
* @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
- * @pps_delay: propagation delay of the PPS output signal
*
* Characteristic information for the various TIME_REF sources possible in the
* E822 devices
@@ -88,7 +87,6 @@ struct ice_phy_reg_info_eth56g {
struct ice_time_ref_info_e82x {
u64 pll_freq;
u64 nominal_incval;
- u8 pps_delay;
};
/**
@@ -326,8 +324,7 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
*/
#define ICE_E810_PLL_FREQ 812500000
#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
-#define ICE_E810_OUT_PROP_DELAY_NS 1
-#define ICE_E825C_OUT_PROP_DELAY_NS 11
+#define ICE_E810_E830_SYNC_DELAY 0
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
@@ -389,11 +386,6 @@ static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
return e82x_time_ref[time_ref].nominal_incval;
}
-static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
-{
- return e82x_time_ref[time_ref].pps_delay;
-}
-
/* E822 Vernier calibration functions */
int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
@@ -404,7 +396,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl(struct ice_hw *hw, u8 data);
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries);
int ice_cgu_get_num_pins(struct ice_hw *hw, bool input);
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
@@ -432,20 +423,6 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
#define ICE_ETH56G_NOMINAL_THRESH4 0x7777
#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6
-static inline u64 ice_prop_delay(const struct ice_hw *hw)
-{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_E825C_OUT_PROP_DELAY_NS;
- case ICE_PHY_E810:
- return ICE_E810_OUT_PROP_DELAY_NS;
- case ICE_PHY_E82X:
- return ice_e82x_pps_delay(ice_e82x_time_ref(hw));
- default:
- return 0;
- }
-}
-
/**
* ice_get_base_incval - Get base clock increment value
* @hw: pointer to the HW struct
@@ -454,23 +431,19 @@ static inline u64 ice_prop_delay(const struct ice_hw *hw)
*/
static inline u64 ice_get_base_incval(struct ice_hw *hw)
{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_ETH56G_NOMINAL_INCVAL;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
return ICE_PTP_NOMINAL_INCVAL_E810;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
+ case ICE_MAC_GENERIC_3K_E825:
+ return ICE_ETH56G_NOMINAL_INCVAL;
default:
return 0;
}
}
-static inline bool ice_is_dual(struct ice_hw *hw)
-{
- return !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
-}
-
#define PFTSYN_SEM_BYTES 4
#define ICE_PTP_CLOCK_INDEX_0 0x00
@@ -673,28 +646,41 @@ static inline bool ice_is_dual(struct ice_hw *hw)
/* E810 timer command register */
#define E810_ETH_GLTSYN_CMD 0x03000344
+/* E830 timer command register */
+#define E830_ETH_GLTSYN_CMD 0x00088814
+
+/* E810 PHC time register */
+#define E830_GLTSYN_TIME_L(_tmr_idx) (0x0008A000 + 0x1000 * (_tmr_idx))
+
/* Source timer incval macros */
#define INCVAL_HIGH_M 0xFF
-/* Timestamp block macros */
+/* PHY 40b registers macros */
+#define PHY_EXT_40B_LOW_M GENMASK(31, 0)
+#define PHY_EXT_40B_HIGH_M GENMASK_ULL(39, 32)
+#define PHY_40B_LOW_M GENMASK(7, 0)
+#define PHY_40B_HIGH_M GENMASK_ULL(39, 8)
#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
-#define TS_PHY_LOW_M 0xFF
-#define TS_PHY_HIGH_M 0xFFFFFFFF
-#define TS_PHY_HIGH_S 8
-
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
/* Tx timestamp low latency read definitions */
-#define TS_LL_READ_RETRIES 200
-#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
-#define TS_LL_READ_TS_IDX GENMASK(29, 24)
-#define TS_LL_READ_TS_INTR BIT(30)
-#define TS_LL_READ_TS BIT(31)
+#define REG_LL_PROXY_H_TIMEOUT_US 2000
+#define REG_LL_PROXY_H_PHY_TMR_CMD_M GENMASK(7, 6)
+#define REG_LL_PROXY_H_PHY_TMR_CMD_ADJ 0x1
+#define REG_LL_PROXY_H_PHY_TMR_CMD_FREQ 0x2
+#define REG_LL_PROXY_H_TS_HIGH GENMASK(23, 16)
+#define REG_LL_PROXY_H_PHY_TMR_IDX_M BIT(24)
+#define REG_LL_PROXY_H_TS_IDX GENMASK(29, 24)
+#define REG_LL_PROXY_H_TS_INTR_ENA BIT(30)
+#define REG_LL_PROXY_H_EXEC BIT(31)
+
+#define REG_LL_PROXY_L PF_SB_ATQBAH
+#define REG_LL_PROXY_H PF_SB_ATQBAL
/* Internal PHY timestamp address */
#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
@@ -789,36 +775,19 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20)
#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21)
-/* GPCS config register */
-#define PHY_GPCS_CONFIG_REG0 0x268
-#define PHY_GPCS_CONFIG_REG0_TX_THR_M ICE_M(0xF, 24)
-#define PHY_GPCS_BITSLIP 0x5C
-
#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0)
#define PHY_TS_INT_CONFIG_ENA_M BIT(6)
-/* 1-step PTP config */
-#define PHY_PTP_1STEP_CONFIG 0x270
-#define PHY_PTP_1STEP_T1S_UP64_M ICE_M(0xF, 4)
-#define PHY_PTP_1STEP_T1S_DELTA_M ICE_M(0xF, 8)
-#define PHY_PTP_1STEP_PEER_DELAY(_port) (0x274 + 4 * (_port))
-#define PHY_PTP_1STEP_PD_ADD_PD_M ICE_M(0x1, 0)
-#define PHY_PTP_1STEP_PD_DELAY_M ICE_M(0x3fffffff, 1)
-#define PHY_PTP_1STEP_PD_DLY_V_M ICE_M(0x1, 31)
-
/* Macros to derive offsets for TimeStampLow and TimeStampHigh */
#define PHY_TSTAMP_L(x) (((x) * 8) + 0)
#define PHY_TSTAMP_U(x) (((x) * 8) + 4)
-#define PHY_REG_REVISION 0x85000
-
#define PHY_REG_DESKEW_0 0x94
#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0)
#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7)
#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3
#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10)
-#define PHY_REG_GPCS_BITSLIP 0x5C
#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset))
#define PHY_REVISION_ETH56G 0x10200
#define PHY_VENDOR_TXLANE_THRESH 0x2000C
@@ -838,7 +807,21 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define PHY_MAC_BLOCKTIME 0x50
#define PHY_MAC_MARKERTIME 0x54
#define PHY_MAC_TX_OFFSET 0x58
+#define PHY_GPCS_BITSLIP 0x5C
#define PHY_PTP_INT_STATUS 0x7FD140
+/* ETH56G registers shared per quad */
+/* GPCS config register */
+#define PHY_GPCS_CONFIG_REG0 0x268
+#define PHY_GPCS_CONFIG_REG0_TX_THR_M GENMASK(27, 24)
+/* 1-step PTP config */
+#define PHY_PTP_1STEP_CONFIG 0x270
+#define PHY_PTP_1STEP_T1S_UP64_M GENMASK(7, 4)
+#define PHY_PTP_1STEP_T1S_DELTA_M GENMASK(11, 8)
+#define PHY_PTP_1STEP_PEER_DELAY(_quad_lane) (0x274 + 4 * (_quad_lane))
+#define PHY_PTP_1STEP_PD_ADD_PD_M BIT(0)
+#define PHY_PTP_1STEP_PD_DELAY_M GENMASK(30, 1)
+#define PHY_PTP_1STEP_PD_DLY_V_M BIT(31)
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 970a99a52bf1..cb08746556a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -4,7 +4,7 @@
#include "ice.h"
#include "ice_eswitch.h"
#include "devlink/devlink.h"
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#include "ice_sriov.h"
#include "ice_tc_lib.h"
#include "ice_dcb_lib.h"
@@ -219,7 +219,8 @@ ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
{
switch (flower->command) {
case FLOW_CLS_REPLACE:
- return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
+ return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower,
+ true);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(repr->src_vsi, flower);
default:
@@ -336,6 +337,7 @@ void ice_repr_destroy(struct ice_repr *repr)
static void ice_repr_rem_vf(struct ice_repr *repr)
{
ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
+ ice_pass_vf_tx_lldp(repr->src_vsi, true);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
@@ -417,6 +419,10 @@ static int ice_repr_add_vf(struct ice_repr *repr)
if (err)
goto err_netdev;
+ err = ice_drop_vf_tx_lldp(repr->src_vsi, true);
+ if (err)
+ goto err_drop_lldp;
+
err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
if (err)
goto err_cfg_vsi;
@@ -429,6 +435,8 @@ static int ice_repr_add_vf(struct ice_repr *repr)
return 0;
err_cfg_vsi:
+ ice_pass_vf_tx_lldp(repr->src_vsi, true);
+err_drop_lldp:
unregister_netdev(repr->netdev);
err_netdev:
ice_devlink_destroy_vf_port(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index 3b0054faf70c..183dd5457d6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -46,13 +46,10 @@ struct ice_sbq_evt_desc {
u8 data[24];
};
-enum ice_sbq_msg_dev {
- eth56g_phy_0 = 0x02,
- rmn_0 = 0x02,
- rmn_1 = 0x03,
- rmn_2 = 0x04,
- cgu = 0x06,
- eth56g_phy_1 = 0x0D,
+enum ice_sbq_dev_id {
+ ice_sbq_dev_phy_0 = 0x02,
+ ice_sbq_dev_cgu = 0x06,
+ ice_sbq_dev_phy_0_peer = 0x0D,
};
enum ice_sbq_msg_opcode {
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 6ca13c5dcb14..d9d09296d1d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -85,6 +85,27 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
}
/**
+ * ice_sched_find_next_vsi_node - find the next node for a given VSI
+ * @vsi_node: VSI support node to start search with
+ *
+ * Return: Next VSI support node, or NULL.
+ *
+ * The function returns a pointer to the next node from the VSI layer
+ * assigned to the given VSI, or NULL if there is no such a node.
+ */
+static struct ice_sched_node *
+ice_sched_find_next_vsi_node(struct ice_sched_node *vsi_node)
+{
+ unsigned int vsi_handle = vsi_node->vsi_handle;
+
+ while ((vsi_node = vsi_node->sibling) != NULL)
+ if (vsi_node->vsi_handle == vsi_handle)
+ break;
+
+ return vsi_node;
+}
+
+/**
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
* @hw: pointer to the HW struct
* @cmd_opc: cmd opcode
@@ -1084,8 +1105,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
if (parent->num_children < max_child_nodes) {
new_num_nodes = max_child_nodes - parent->num_children;
} else {
- /* This parent is full, try the next sibling */
- parent = parent->sibling;
+ /* This parent is full,
+ * try the next available sibling.
+ */
+ parent = ice_sched_find_next_vsi_node(parent);
/* Don't modify the first node TEID memory if the
* first node was added already in the above call.
* Instead send some temp memory for all other
@@ -1528,12 +1551,23 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
+ struct ice_sched_node *next_vsi_node;
+
/* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
if (qgrp_node->num_children < max_children &&
qgrp_node->owner == owner)
break;
qgrp_node = qgrp_node->sibling;
+ if (qgrp_node)
+ continue;
+
+ next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
+ if (!next_vsi_node)
+ break;
+
+ vsi_node = next_vsi_node;
+ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
}
/* Select the best queue group */
@@ -1604,16 +1638,16 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
* @hw: pointer to the HW struct
- * @num_qs: number of queues
+ * @num_new_qs: number of new queues that will be added to the tree
* @num_nodes: num nodes array
*
* This function calculates the number of VSI child nodes based on the
* number of queues.
*/
static void
-ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
+ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_new_qs, u16 *num_nodes)
{
- u16 num = num_qs;
+ u16 num = num_new_qs;
u8 i, qgl, vsil;
qgl = ice_sched_get_qgrp_layer(hw);
@@ -1779,7 +1813,11 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!parent)
return -EIO;
- if (i == vsil)
+ /* Do not modify the VSI handle for already existing VSI nodes,
+ * (if no new VSI node was added to the tree).
+ * Assign the VSI handle only to newly added VSI nodes.
+ */
+ if (i == vsil && num_added)
parent->vsi_handle = vsi_handle;
}
@@ -1813,6 +1851,41 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
}
/**
+ * ice_sched_recalc_vsi_support_nodes - recalculate VSI support nodes count
+ * @hw: pointer to the HW struct
+ * @vsi_node: pointer to the leftmost VSI node that needs to be extended
+ * @new_numqs: new number of queues that has to be handled by the VSI
+ * @new_num_nodes: pointer to nodes count table to modify the VSI layer entry
+ *
+ * This function recalculates the number of supported nodes that need to
+ * be added after adding more Tx queues for a given VSI.
+ * The number of new VSI support nodes that shall be added will be saved
+ * to the @new_num_nodes table for the VSI layer.
+ */
+static void
+ice_sched_recalc_vsi_support_nodes(struct ice_hw *hw,
+ struct ice_sched_node *vsi_node,
+ unsigned int new_numqs, u16 *new_num_nodes)
+{
+ u32 vsi_nodes_cnt = 1;
+ u32 max_queue_cnt = 1;
+ u32 qgl, vsil;
+
+ qgl = ice_sched_get_qgrp_layer(hw);
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ for (u32 i = vsil; i <= qgl; i++)
+ max_queue_cnt *= hw->max_children[i];
+
+ while ((vsi_node = ice_sched_find_next_vsi_node(vsi_node)) != NULL)
+ vsi_nodes_cnt++;
+
+ if (new_numqs > (max_queue_cnt * vsi_nodes_cnt))
+ new_num_nodes[vsil] = DIV_ROUND_UP(new_numqs, max_queue_cnt) -
+ vsi_nodes_cnt;
+}
+
+/**
* ice_sched_update_vsi_child_nodes - update VSI child nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -1863,15 +1936,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
return status;
}
- if (new_numqs)
- ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
- /* Keep the max number of queue configuration all the time. Update the
- * tree only if number of queues > previous number of queues. This may
+ ice_sched_recalc_vsi_support_nodes(hw, vsi_node,
+ new_numqs, new_num_nodes);
+ ice_sched_calc_vsi_child_nodes(hw, new_numqs - prev_numqs,
+ new_num_nodes);
+
+ /* Never decrease the number of queues in the tree. Update the tree
+ * only if number of queues > previous number of queues. This may
* leave some extra nodes in the tree if number of queues < previous
* number but that wouldn't harm anything. Removing those extra nodes
* may complicate the code if those nodes are part of SRL or
* individually rate limited.
+ * Also, add the required VSI support nodes if the existing ones cannot
+ * handle the requested new number of queues.
*/
+ status = ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
+ new_num_nodes);
+ if (status)
+ return status;
+
status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
new_num_nodes, owner);
if (status)
@@ -2013,6 +2096,58 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
}
/**
+ * ice_sched_rm_vsi_subtree - remove all nodes assigned to a given VSI
+ * @pi: port information structure
+ * @vsi_node: pointer to the leftmost node of the VSI to be removed
+ * @owner: LAN or RDMA
+ * @tc: TC number
+ *
+ * Return: Zero in case of success, or -EBUSY if the VSI has leaf nodes in TC.
+ *
+ * This function removes all the VSI support nodes associated with a given VSI
+ * and its LAN or RDMA children nodes from the scheduler tree.
+ */
+static int
+ice_sched_rm_vsi_subtree(struct ice_port_info *pi,
+ struct ice_sched_node *vsi_node, u8 owner, u8 tc)
+{
+ u16 vsi_handle = vsi_node->vsi_handle;
+ bool all_vsi_nodes_removed = true;
+ int j = 0;
+
+ while (vsi_node) {
+ struct ice_sched_node *next_vsi_node;
+
+ if (ice_sched_is_leaf_node_present(vsi_node)) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", tc);
+ return -EBUSY;
+ }
+ while (j < vsi_node->num_children) {
+ if (vsi_node->children[j]->owner == owner)
+ ice_free_sched_node(pi, vsi_node->children[j]);
+ else
+ j++;
+ }
+
+ next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
+
+ /* remove the VSI if it has no children */
+ if (!vsi_node->num_children)
+ ice_free_sched_node(pi, vsi_node);
+ else
+ all_vsi_nodes_removed = false;
+
+ vsi_node = next_vsi_node;
+ }
+
+ /* clean up aggregator related VSI info if any */
+ if (all_vsi_nodes_removed)
+ ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+
+ return 0;
+}
+
+/**
* ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -2038,7 +2173,6 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
ice_for_each_traffic_class(i) {
struct ice_sched_node *vsi_node, *tc_node;
- u8 j = 0;
tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node)
@@ -2048,31 +2182,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_node)
continue;
- if (ice_sched_is_leaf_node_present(vsi_node)) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
- status = -EBUSY;
+ status = ice_sched_rm_vsi_subtree(pi, vsi_node, owner, i);
+ if (status)
goto exit_sched_rm_vsi_cfg;
- }
- while (j < vsi_node->num_children) {
- if (vsi_node->children[j]->owner == owner) {
- ice_free_sched_node(pi, vsi_node->children[j]);
- /* reset the counter again since the num
- * children will be updated after node removal
- */
- j = 0;
- } else {
- j++;
- }
- }
- /* remove the VSI if it has no children */
- if (!vsi_node->num_children) {
- ice_free_sched_node(pi, vsi_node);
- vsi_ctx->sched.vsi_node[i] = NULL;
+ vsi_ctx->sched.vsi_node[i] = NULL;
- /* clean up aggregator related VSI info if any */
- ice_sched_rm_agg_vsi_info(pi, vsi_handle);
- }
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[i] = 0;
else
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
index 75d7147e1c01..1a2c94375ca7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sf_eth.c
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -5,8 +5,8 @@
#include "ice_txrx.h"
#include "ice_fltr.h"
#include "ice_sf_eth.h"
-#include "devlink/devlink_port.h"
#include "devlink/devlink.h"
+#include "devlink/port.h"
static const struct net_device_ops ice_sf_netdev_ops = {
.ndo_open = ice_open,
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index b83f99c01d91..0e4dc1a5cff0 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf)
hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
hash_del_rcu(&vf->entry);
+ ice_deinitialize_vf_entry(vf);
ice_put_vf(vf);
}
}
@@ -62,6 +63,7 @@ static void ice_free_vf_res(struct ice_vf *vf)
if (vf->lan_vsi_idx != ICE_NO_VSI) {
ice_vf_vsi_release(vf);
vf->num_mac = 0;
+ vf->num_mac_lldp = 0;
}
last_vector_idx = vf->first_vector_idx + vf->num_msix - 1;
@@ -123,27 +125,6 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
}
/**
- * ice_sriov_free_msix_res - Reset/free any used MSIX resources
- * @pf: pointer to the PF structure
- *
- * Since no MSIX entries are taken from the pf->irq_tracker then just clear
- * the pf->sriov_base_vector.
- *
- * Returns 0 on success, and -EINVAL on error.
- */
-static int ice_sriov_free_msix_res(struct ice_pf *pf)
-{
- if (!pf)
- return -EINVAL;
-
- bitmap_free(pf->sriov_irq_bm);
- pf->sriov_irq_size = 0;
- pf->sriov_base_vector = 0;
-
- return 0;
-}
-
-/**
* ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure
*/
@@ -177,6 +158,7 @@ void ice_free_vfs(struct ice_pf *pf)
ice_eswitch_detach_vf(pf, vf);
ice_dis_vf_qs(vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
/* disable VF qp mappings and set VF disable state */
@@ -193,16 +175,9 @@ void ice_free_vfs(struct ice_pf *pf)
wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
- /* clear malicious info since the VF is getting released */
- if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
- list_del(&vf->mbx_info.list_entry);
-
mutex_unlock(&vf->cfg_lock);
}
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
vfs->num_qps_per = 0;
ice_free_vf_entries(pf);
@@ -372,40 +347,6 @@ void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
}
/**
- * ice_sriov_set_msix_res - Set any used MSIX resources
- * @pf: pointer to PF structure
- * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
- *
- * This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
- * just set the pf->sriov_base_vector and return success.
- *
- * If there are not enough resources available, return an error. This should
- * always be caught by ice_set_per_vf_res().
- *
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
- * in the PF's space available for SR-IOV.
- */
-static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
-{
- u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors_used = ice_get_max_used_msix_vector(pf);
- int sriov_base_vector;
-
- sriov_base_vector = total_vectors - num_msix_needed;
-
- /* make sure we only grab irq_tracker entries from the list end and
- * that we have enough available MSIX vectors
- */
- if (sriov_base_vector < vectors_used)
- return -EINVAL;
-
- pf->sriov_base_vector = sriov_base_vector;
-
- return 0;
-}
-
-/**
* ice_set_per_vf_res - check if vectors and queues are available
* @pf: pointer to the PF structure
* @num_vfs: the number of SR-IOV VFs being configured
@@ -429,11 +370,9 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
*/
static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
{
- int vectors_used = ice_get_max_used_msix_vector(pf);
u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
int msix_avail_per_vf, msix_avail_for_sriov;
struct device *dev = ice_pf_to_dev(pf);
- int err;
lockdep_assert_held(&pf->vfs.table_lock);
@@ -441,8 +380,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -EINVAL;
/* determine MSI-X resources per VF */
- msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
- vectors_used;
+ msix_avail_for_sriov = pf->virt_irq_tracker.num_entries;
msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
@@ -481,13 +419,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -ENOSPC;
}
- err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
- if (err) {
- dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
- num_vfs, err);
- return err;
- }
-
/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
pf->vfs.num_msix_per = num_msix_per_vf;
@@ -498,52 +429,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
}
/**
- * ice_sriov_get_irqs - get irqs for SR-IOV usacase
- * @pf: pointer to PF structure
- * @needed: number of irqs to get
- *
- * This returns the first MSI-X vector index in PF space that is used by this
- * VF. This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration_id: id of VF which will
- * use this irqs
- *
- * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are
- * allocated from the end of global irq index. First bit in sriov_irq_bm means
- * last irq index etc. It simplifies extension of SRIOV vectors.
- * They will be always located from sriov_base_vector to the last irq
- * index. While increasing/decreasing sriov_base_vector can be moved.
- */
-static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed)
-{
- int res = bitmap_find_next_zero_area(pf->sriov_irq_bm,
- pf->sriov_irq_size, 0, needed, 0);
- /* conversion from number in bitmap to global irq index */
- int index = pf->sriov_irq_size - res - needed;
-
- if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector)
- return -ENOENT;
-
- bitmap_set(pf->sriov_irq_bm, res, needed);
- return index;
-}
-
-/**
- * ice_sriov_free_irqs - free irqs used by the VF
- * @pf: pointer to PF structure
- * @vf: pointer to VF structure
- */
-static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf)
-{
- /* Move back from first vector index to first index in bitmap */
- int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix;
-
- bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix);
- vf->first_vector_idx = 0;
-}
-
-/**
* ice_init_vf_vsi_res - initialize/setup VF VSI resources
* @vf: VF to initialize/setup the VSI for
*
@@ -556,7 +441,7 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
struct ice_vsi *vsi;
int err;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
return -ENOMEM;
@@ -856,16 +741,10 @@ err_free_entries:
*/
static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
- int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int ret;
- pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL);
- if (!pf->sriov_irq_bm)
- return -ENOMEM;
- pf->sriov_irq_size = total_vectors;
-
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
@@ -918,7 +797,6 @@ err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
clear_bit(ICE_OICR_INTR_DIS, pf->state);
- bitmap_free(pf->sriov_irq_bm);
return ret;
}
@@ -992,16 +870,7 @@ u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
- return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf);
-}
-
-static int ice_sriov_move_base_vector(struct ice_pf *pf, int move)
-{
- if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf))
- return -ENOMEM;
-
- pf->sriov_base_vector -= move;
- return 0;
+ return pf->virt_irq_tracker.num_entries;
}
static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
@@ -1020,7 +889,8 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
ice_dis_vf_mappings(tmp_vf);
- ice_sriov_free_irqs(pf, tmp_vf);
+ ice_virt_free_irqs(pf, tmp_vf->first_vector_idx,
+ tmp_vf->num_msix);
vf_ids[to_remap] = tmp_vf->vf_id;
to_remap += 1;
@@ -1032,7 +902,7 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
tmp_vf->first_vector_idx =
- ice_sriov_get_irqs(pf, tmp_vf->num_msix);
+ ice_virt_get_irqs(pf, tmp_vf->num_msix);
/* there is no need to rebuild VSI as we are only changing the
* vector indexes not amount of MSI-X or queues
*/
@@ -1105,20 +975,15 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;
- if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) {
- ice_put_vf(vf);
- return -ENOSPC;
- }
-
ice_dis_vf_mappings(vf);
- ice_sriov_free_irqs(pf, vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
/* Remap all VFs beside the one is now configured */
ice_sriov_remap_vectors(pf, vf->vf_id);
vf->num_msix = msix_vec_count;
vf->num_vf_qs = queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
goto unroll;
@@ -1147,7 +1012,8 @@ unroll:
vf->num_msix = prev_msix;
vf->num_vf_qs = prev_queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0) {
ice_put_vf(vf);
return -EINVAL;
@@ -1537,6 +1403,9 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
mutex_lock(&vf->cfg_lock);
+ while (!trusted && vf->num_mac_lldp)
+ ice_vf_update_mac_lldp_num(vf, ice_get_vf_vsi(vf), false);
+
vf->trusted = trusted;
ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 0e740342e294..9d9a7edd3618 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -3146,7 +3146,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle_arr[2];
/* A rule already exists with the new VSI being added */
- if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
+ if (cur_fltr->vsi_handle == new_fltr->vsi_handle)
return -EEXIST;
vsi_handle_arr[0] = cur_fltr->vsi_handle;
@@ -4784,7 +4784,8 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
*/
if (found && recp[i].tun_type == rinfo->tun_type &&
recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
- recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
+ recp[i].allow_pass_l2 == rinfo->allow_pass_l2 &&
+ recp[i].priority == rinfo->priority)
return i; /* Return the recipe ID */
}
}
@@ -5977,7 +5978,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
- return 0;
+ return -EEXIST;
/* Update the previously created VSI list set with
* the new VSI ID passed in
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index ea39b999a0d0..fb9ea7f8ef44 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -12,14 +12,11 @@
/**
* ice_tc_count_lkups - determine lookup count for switch filter
* @flags: TC-flower flags
- * @headers: Pointer to TC flower filter header structure
* @fltr: Pointer to outer TC filter structure
*
- * Determine lookup count based on TC flower input for switch filter.
+ * Return: lookup count based on TC flower input for a switch filter.
*/
-static int
-ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
- struct ice_tc_flower_fltr *fltr)
+static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr)
{
int lkups_cnt = 1; /* 0th lookup is metadata */
@@ -684,26 +681,26 @@ static int ice_tc_setup_action(struct net_device *filter_dev,
fltr->action.fltr_act = action;
if (ice_is_port_repr_netdev(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
+ ice_is_port_repr_netdev(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_is_port_repr_netdev(filter_dev) &&
- ice_tc_is_dev_uplink(target_dev)) {
+ ice_tc_is_dev_uplink(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
repr = ice_netdev_to_repr(filter_dev);
fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
+ ice_is_port_repr_netdev(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
+ "The action is not supported for this netdevice");
return -EINVAL;
}
@@ -716,13 +713,11 @@ ice_tc_setup_drop_action(struct net_device *filter_dev,
{
fltr->action.fltr_act = ICE_DROP_PACKET;
- if (ice_is_port_repr_netdev(filter_dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else if (ice_tc_is_dev_uplink(filter_dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
- } else {
+ if (!ice_tc_is_dev_uplink(filter_dev) &&
+ !(ice_is_port_repr_netdev(filter_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) {
NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
+ "The action is not supported for this netdevice");
return -EINVAL;
}
@@ -767,10 +762,157 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
return 0;
}
+static bool ice_is_fltr_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ return fltr->outer_headers.l2_key.n_proto == htons(ETH_P_LLDP);
+}
+
+static bool ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_vsi *vsi = fltr->src_vsi, *uplink;
+
+ if (!ice_is_switchdev_running(vsi->back))
+ return false;
+
+ uplink = vsi->back->eswitch.uplink_vsi;
+ return vsi == uplink && fltr->action.fltr_act == ICE_DROP_PACKET &&
+ ice_is_fltr_lldp(fltr) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->flags == ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
+}
+
+static bool ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_vsi *vsi = fltr->src_vsi, *uplink;
+
+ uplink = vsi->back->eswitch.uplink_vsi;
+ return fltr->src_vsi->type == ICE_VSI_VF && ice_is_fltr_lldp(fltr) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->dest_vsi == uplink;
+}
+
+static struct ice_tc_flower_fltr *
+ice_find_pf_tx_lldp_fltr(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ return fltr;
+
+ return NULL;
+}
+
+static bool ice_any_vf_lldp_tx_ena(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ ice_for_each_vf(pf, bkt, vf)
+ if (vf->lldp_tx_ena)
+ return true;
+
+ return false;
+}
+
+int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit)
+{
+ struct ice_rule_query_data remove_entry = {
+ .rid = vsi->vf->lldp_recipe_id,
+ .rule_id = vsi->vf->lldp_rule_id,
+ .vsi_handle = vsi->idx,
+ };
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (vsi->vf->lldp_tx_ena)
+ return 0;
+
+ if (!deinit && !ice_find_pf_tx_lldp_fltr(vsi->back))
+ return -EINVAL;
+
+ if (!deinit && ice_any_vf_lldp_tx_ena(pf))
+ return -EINVAL;
+
+ err = ice_rem_adv_rule_by_id(&pf->hw, &remove_entry);
+ if (!err)
+ vsi->vf->lldp_tx_ena = true;
+
+ return err;
+}
+
+int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init)
+{
+ struct ice_rule_query_data rule_added;
+ struct ice_adv_rule_info rinfo = {
+ .priority = 7,
+ .src_vsi = vsi->idx,
+ .sw_act = {
+ .src = vsi->idx,
+ .flag = ICE_FLTR_TX,
+ .fltr_act = ICE_DROP_PACKET,
+ .vsi_handle = vsi->idx,
+ },
+ .flags_info.act_valid = true,
+ };
+ struct ice_adv_lkup_elem list[3];
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (!init && !vsi->vf->lldp_tx_ena)
+ return 0;
+
+ memset(list, 0, sizeof(list));
+ ice_rule_add_direction_metadata(&list[0]);
+ ice_rule_add_src_vsi_metadata(&list[1]);
+ list[2].type = ICE_ETYPE_OL;
+ list[2].h_u.ethertype.ethtype_id = htons(ETH_P_LLDP);
+ list[2].m_u.ethertype.ethtype_id = htons(0xFFFF);
+
+ err = ice_add_adv_rule(&pf->hw, list, ARRAY_SIZE(list), &rinfo,
+ &rule_added);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Failed to add an LLDP rule to VSI 0x%X: %d\n",
+ vsi->idx, err);
+ } else {
+ vsi->vf->lldp_recipe_id = rule_added.rid;
+ vsi->vf->lldp_rule_id = rule_added.rule_id;
+ vsi->vf->lldp_tx_ena = false;
+ }
+
+ return err;
+}
+
+static void ice_handle_add_pf_lldp_drop_rule(struct ice_vsi *vsi)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) {
+ if (!ice_is_fltr_vf_tx_lldp(fltr))
+ continue;
+ ice_pass_vf_tx_lldp(fltr->src_vsi, true);
+ break;
+ }
+}
+
+static void ice_handle_del_pf_lldp_drop_rule(struct ice_pf *pf)
+{
+ int i;
+
+ /* Make the VF LLDP fwd to uplink rule dormant */
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vf_vsi = pf->vsi[i];
+
+ if (vf_vsi && vf_vsi->type == ICE_VSI_VF)
+ ice_drop_vf_tx_lldp(vf_vsi, false);
+ }
+}
+
static int
ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{
- struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
struct ice_adv_rule_info rule_info = { 0 };
struct ice_rule_query_data rule_added;
struct ice_hw *hw = &vsi->back->hw;
@@ -785,7 +927,10 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
return -EOPNOTSUPP;
}
- lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
+ if (ice_is_fltr_vf_tx_lldp(fltr))
+ return ice_pass_vf_tx_lldp(vsi, false);
+
+ lkups_cnt = ice_tc_count_lkups(flags, fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
@@ -814,6 +959,11 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ !fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) {
+ /* PF to Uplink */
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
/* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
@@ -846,11 +996,17 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
+ } else if (ret == -ENOSPC) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available.");
+ goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
goto exit;
}
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ ice_handle_add_pf_lldp_drop_rule(vsi);
+
/* store the output params, which are needed later for removing
* advanced switch filter
*/
@@ -985,7 +1141,6 @@ static int
ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
struct ice_tc_flower_fltr *tc_fltr)
{
- struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
struct ice_adv_rule_info rule_info = {0};
struct ice_rule_query_data rule_added;
struct ice_adv_lkup_elem *list;
@@ -1021,7 +1176,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
return PTR_ERR(dest_vsi);
}
- lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
+ lkups_cnt = ice_tc_count_lkups(flags, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
@@ -1056,8 +1211,13 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
break;
case ICE_DROP_PACKET:
- rule_info.sw_act.flag |= ICE_FLTR_RX;
- rule_info.sw_act.src = hw->pf_id;
+ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ }
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
break;
default:
@@ -1071,6 +1231,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
"Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
+ } else if (ret == -ENOSPC) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter: insufficient space available.");
+ goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
"Unable to add filter due to error");
@@ -1463,11 +1627,16 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
* @filter_dev: Pointer to device on which filter is being added
* @f: Pointer to struct flow_cls_offload
* @fltr: Pointer to filter structure
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was parsed successfully, -EINVAL if the flower
+ * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured
+ * for the given VSI.
*/
static int
ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
struct flow_cls_offload *f,
- struct ice_tc_flower_fltr *fltr)
+ struct ice_tc_flower_fltr *fltr, bool ingress)
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
@@ -1551,6 +1720,20 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
}
+ if (!ingress) {
+ bool switchdev =
+ ice_is_eswitch_mode_switchdev(vsi->back);
+
+ if (switchdev != (n_proto_key == ETH_P_LLDP)) {
+ NL_SET_ERR_MSG_FMT_MOD(fltr->extack,
+ "%sLLDP filtering is not supported on egress in %s mode",
+ switchdev ? "Non-" : "",
+ switchdev ? "switchdev" :
+ "legacy");
+ return -EOPNOTSUPP;
+ }
+ }
+
headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
headers->l3_key.ip_proto = match.key->ip_proto;
@@ -1726,6 +1909,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return -EINVAL;
}
}
+
+ /* Ingress filter on representor results in an egress filter in HW
+ * and vice versa
+ */
+ ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress;
+ fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS :
+ ICE_ESWITCH_FLTR_EGRESS;
+
return 0;
}
@@ -1939,6 +2130,12 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
struct ice_pf *pf = vsi->back;
int err;
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ ice_handle_del_pf_lldp_drop_rule(pf);
+
+ if (ice_is_fltr_vf_tx_lldp(fltr))
+ return ice_drop_vf_tx_lldp(vsi, false);
+
rule_rem.rid = fltr->rid;
rule_rem.rule_id = fltr->rule_id;
rule_rem.vsi_handle = fltr->dest_vsi_handle;
@@ -1975,14 +2172,18 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
* @vsi: Pointer to VSI
* @f: Pointer to flower offload structure
* @__fltr: Pointer to struct ice_tc_flower_fltr
+ * @ingress: if the rule is added to an ingress block
*
* This function parses TC-flower input fields, parses action,
* and adds a filter.
+ *
+ * Return: 0 if the filter was successfully added,
+ * negative error code otherwise.
*/
static int
ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
struct flow_cls_offload *f,
- struct ice_tc_flower_fltr **__fltr)
+ struct ice_tc_flower_fltr **__fltr, bool ingress)
{
struct ice_tc_flower_fltr *fltr;
int err;
@@ -1999,7 +2200,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
fltr->src_vsi = vsi;
INIT_HLIST_NODE(&fltr->tc_flower_node);
- err = ice_parse_cls_flower(netdev, vsi, f, fltr);
+ err = ice_parse_cls_flower(netdev, vsi, f, fltr, ingress);
if (err < 0)
goto err;
@@ -2042,10 +2243,13 @@ ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
* @netdev: Pointer to filter device
* @vsi: Pointer to VSI
* @cls_flower: Pointer to flower offload structure
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was successfully added,
+ * negative error code otherwise.
*/
-int
-ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower)
+int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower, bool ingress)
{
struct netlink_ext_ack *extack = cls_flower->common.extack;
struct net_device *vsi_netdev = vsi->netdev;
@@ -2080,7 +2284,7 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
}
/* prep and add TC-flower filter in HW */
- err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
+ err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr, ingress);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index d84f153517ec..8a3ab2f22af9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -211,13 +211,14 @@ static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf)
}
struct ice_vsi *ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue);
-int
-ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower);
-int
-ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
+int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower, bool ingress);
+int ice_del_cls_flower(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower);
void ice_replay_tc_fltrs(struct ice_pf *pf);
bool ice_is_tunnel_supported(struct net_device *dev);
+int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init);
+int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit);
static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 5d2d7736fd5f..0e5107fe62ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -527,15 +527,14 @@ err:
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
- * @rx_buf: Rx buffer to store the XDP action
* @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static void
+static u32
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
- struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
+ union ice_32b_rx_flex_desc *eop_desc)
{
unsigned int ret = ICE_XDP_PASS;
u32 act;
@@ -574,7 +573,7 @@ out_failure:
ret = ICE_XDP_CONSUMED;
}
exit:
- ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ return ret;
}
/**
@@ -860,10 +859,8 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
xdp_buff_set_frags_flag(xdp);
}
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS))
return -ENOMEM;
- }
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
@@ -924,7 +921,6 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[ntc];
- rx_buf->pgcnt = page_count(rx_buf->page);
prefetchw(rx_buf->page);
if (!size)
@@ -941,6 +937,31 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
}
/**
+ * ice_get_pgcnts - grab page_count() for gathered fragments
+ * @rx_ring: Rx descriptor ring to store the page counts on
+ *
+ * This function is intended to be called right before running XDP
+ * program so that the page recycling mechanism will be able to take
+ * a correct decision regarding underlying pages; this is done in such
+ * way as XDP program can change the refcount of page
+ */
+static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
+{
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ struct ice_rx_buf *rx_buf;
+ u32 cnt = rx_ring->count;
+
+ for (int i = 0; i < nr_frags; i++) {
+ rx_buf = &rx_ring->rx_buf[idx];
+ rx_buf->pgcnt = page_count(rx_buf->page);
+
+ if (++idx == cnt)
+ idx = 0;
+ }
+}
+
+/**
* ice_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
@@ -1051,12 +1072,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
rx_buf->page_offset + headlen, size,
xdp->frame_sz);
} else {
- /* buffer is unused, change the act that should be taken later
- * on; data was copied onto skb's linear part so there's no
+ /* buffer is unused, restore biased page count in Rx buffer;
+ * data was copied onto skb's linear part so there's no
* need for adjusting page offset and we can reuse this buffer
* as-is
*/
- rx_buf->act = ICE_SKB_CONSUMED;
+ rx_buf->pagecnt_bias++;
}
if (unlikely(xdp_buff_has_frags(xdp))) {
@@ -1104,6 +1125,65 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
}
/**
+ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
+ * @rx_ring: Rx ring with all the auxiliary data
+ * @xdp: XDP buffer carrying linear + frags part
+ * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
+ * @ntc: a current next_to_clean value to be stored at rx_ring
+ * @verdict: return code from XDP program execution
+ *
+ * Walk through gathered fragments and satisfy internal page
+ * recycle mechanism; we take here an action related to verdict
+ * returned by XDP program;
+ */
+static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ u32 *xdp_xmit, u32 ntc, u32 verdict)
+{
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ u32 cnt = rx_ring->count;
+ u32 post_xdp_frags = 1;
+ struct ice_rx_buf *buf;
+ int i;
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
+
+ for (i = 0; i < post_xdp_frags; i++) {
+ buf = &rx_ring->rx_buf[idx];
+
+ if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ *xdp_xmit |= verdict;
+ } else if (verdict & ICE_XDP_CONSUMED) {
+ buf->pagecnt_bias++;
+ } else if (verdict == ICE_XDP_PASS) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ }
+
+ ice_put_rx_buf(rx_ring, buf);
+
+ if (++idx == cnt)
+ idx = 0;
+ }
+ /* handle buffers that represented frags released by XDP prog;
+ * for these we keep pagecnt_bias as-is; refcount from struct page
+ * has been decremented within XDP prog and we do not have to increase
+ * the biased refcnt
+ */
+ for (; i < nr_frags; i++) {
+ buf = &rx_ring->rx_buf[idx];
+ ice_put_rx_buf(rx_ring, buf);
+ if (++idx == cnt)
+ idx = 0;
+ }
+
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
+}
+
+/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1120,15 +1200,13 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
struct xdp_buff *xdp = &rx_ring->xdp;
- u32 cached_ntc = rx_ring->first_desc;
struct ice_tx_ring *xdp_ring = NULL;
struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
+ u32 cached_ntu, xdp_verdict;
u32 cnt = rx_ring->count;
u32 xdp_xmit = 0;
- u32 cached_ntu;
bool failure;
- u32 first;
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (xdp_prog) {
@@ -1190,6 +1268,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
xdp_buff_clear_frags_flag(xdp);
} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+ ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
break;
}
if (++ntc == cnt)
@@ -1199,15 +1278,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
- if (rx_buf->act == ICE_XDP_PASS)
+ ice_get_pgcnts(rx_ring);
+ xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
+ if (xdp_verdict == ICE_XDP_PASS)
goto construct_skb;
total_rx_bytes += xdp_get_buff_len(xdp);
total_rx_pkts++;
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
+ ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+
continue;
construct_skb:
if (likely(ice_ring_uses_build_skb(rx_ring)))
@@ -1217,18 +1296,12 @@ construct_skb:
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- rx_buf->act = ICE_XDP_CONSUMED;
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring,
- ICE_XDP_CONSUMED);
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
- break;
+ xdp_verdict = ICE_XDP_CONSUMED;
}
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
+ ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+
+ if (!skb)
+ break;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
@@ -1257,23 +1330,6 @@ construct_skb:
total_rx_pkts++;
}
- first = rx_ring->first_desc;
- while (cached_ntc != first) {
- struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
-
- if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- xdp_xmit |= buf->act;
- } else if (buf->act & ICE_XDP_CONSUMED) {
- buf->pagecnt_bias++;
- } else if (buf->act == ICE_XDP_PASS) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- }
-
- ice_put_rx_buf(rx_ring, buf);
- if (++cached_ntc >= cnt)
- cached_ntc = 0;
- }
rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
@@ -1753,6 +1809,7 @@ dma_error:
static
int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
+ const struct ice_tx_ring *tx_ring = off->tx_ring;
u32 l4_len = 0, l3_len = 0, l2_len = 0;
struct sk_buff *skb = first->skb;
union {
@@ -1902,6 +1959,30 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l3_len = l4.hdr - ip.hdr;
offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
+ if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) &&
+ !(first->tx_flags & ICE_TX_FLAGS_TSO) &&
+ !skb_csum_is_sctp(skb)) {
+ /* Set GCS */
+ u16 csum_start = (skb->csum_start - skb->mac_header) / 2;
+ u16 csum_offset = skb->csum_offset / 2;
+ u16 gcs_params;
+
+ gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) |
+ FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) |
+ FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M,
+ ICE_TX_GCS_DESC_CSUM_PSH);
+
+ /* Unlike legacy HW checksums, GCS requires a context
+ * descriptor.
+ */
+ off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX;
+ off->cd_gcs_params = gcs_params;
+ /* Fill out CSO info in data descriptors */
+ off->td_offset |= offset;
+ off->td_cmd |= cmd;
+ return 1;
+ }
+
/* Enable L4 checksum offloads */
switch (l4_proto) {
case IPPROTO_TCP:
@@ -2359,17 +2440,20 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
eth = (struct ethhdr *)skb_mac_header(skb);
- if (unlikely((skb->priority == TC_PRIO_CONTROL ||
- eth->h_proto == htons(ETH_P_LLDP)) &&
- vsi->type == ICE_VSI_PF &&
- vsi->port_info->qos_cfg.is_sw_lldp))
+
+ if ((ice_is_switchdev_running(vsi->back) ||
+ ice_lag_is_switchdev_running(vsi->back)) &&
+ vsi->type != ICE_VSI_SF)
+ ice_eswitch_set_target_vsi(skb, &offload);
+ else if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
+ vsi->type == ICE_VSI_PF &&
+ vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
ICE_TX_CTX_DESC_SWTCH_UPLINK <<
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF)
- ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
@@ -2383,7 +2467,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* setup context descriptor */
cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
- cdesc->rsvd = cpu_to_le16(0);
+ cdesc->gcs = cpu_to_le16(offload.cd_gcs_params);
cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index cb347c852ba9..a4b1e9514632 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -193,6 +193,7 @@ struct ice_tx_offload_params {
u32 td_l2tag1;
u32 cd_tunnel_params;
u16 cd_l2tag2;
+ u16 cd_gcs_params;
u8 header_len;
};
@@ -201,7 +202,6 @@ struct ice_rx_buf {
struct page *page;
unsigned int page_offset;
unsigned int pgcnt;
- unsigned int act;
unsigned int pagecnt_bias;
};
@@ -367,6 +367,7 @@ struct ice_rx_ring {
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
#define ICE_RX_FLAGS_MULTIDEV BIT(3)
+#define ICE_RX_FLAGS_RING_GCS BIT(4)
u8 flags;
/* CL5 - 5th cacheline starts here */
struct xdp_rxq_info xdp_rxq;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 2719f0e20933..45cfaabc41cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -81,6 +81,23 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
}
/**
+ * ice_rx_gcs - Set generic checksum in skb
+ * @skb: skb currently being received and modified
+ * @rx_desc: receive descriptor
+ */
+static void ice_rx_gcs(struct sk_buff *skb,
+ const union ice_32b_rx_flex_desc *rx_desc)
+{
+ const struct ice_32b_rx_flex_desc_nic *desc;
+ u16 csum;
+
+ desc = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ csum = (__force u16)desc->raw_csum;
+ skb->csum = csum_unfold((__force __sum16)swab16(csum));
+}
+
+/**
* ice_rx_csum - Indicate in skb if checksum is good
* @ring: the ring we care about
* @skb: skb currently being received and modified
@@ -107,6 +124,15 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
+ if ((ring->flags & ICE_RX_FLAGS_RING_GCS) &&
+ rx_desc->wb.rxdid == ICE_RXDID_FLEX_NIC &&
+ (decoded.inner_prot == LIBETH_RX_PT_INNER_TCP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_UDP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_ICMP)) {
+ ice_rx_gcs(skb, rx_desc);
+ return;
+ }
+
/* check if HW has decoded the packet and checksum */
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 79f960c6680d..6cf32b404127 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -6,49 +6,6 @@
#include "ice.h"
/**
- * ice_set_rx_bufs_act - propagate Rx buffer action to frags
- * @xdp: XDP buffer representing frame (linear and frags part)
- * @rx_ring: Rx ring struct
- * act: action to store onto Rx buffers related to XDP buffer parts
- *
- * Set action that should be taken before putting Rx buffer from first frag
- * to the last.
- */
-static inline void
-ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
- const unsigned int act)
-{
- u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
- u32 nr_frags = rx_ring->nr_frags + 1;
- u32 idx = rx_ring->first_desc;
- u32 cnt = rx_ring->count;
- struct ice_rx_buf *buf;
-
- for (int i = 0; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[idx];
- buf->act = act;
-
- if (++idx == cnt)
- idx = 0;
- }
-
- /* adjust pagecnt_bias on frags freed by XDP prog */
- if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
- u32 delta = rx_ring->nr_frags - sinfo_frags;
-
- while (delta) {
- if (idx == 0)
- idx = cnt - 1;
- else
- idx--;
- buf = &rx_ring->rx_buf[idx];
- buf->pagecnt_bias--;
- delta--;
- }
- }
-}
-
-/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
* @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index adb168860711..3d68f465952d 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -18,6 +18,8 @@
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
#include "ice_fwlog.h"
+#include <linux/wait.h>
+#include <net/dscp.h>
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -368,6 +370,7 @@ struct ice_ts_func_info {
#define ICE_TS_TMR1_ENA_M BIT(26)
#define ICE_TS_LL_TX_TS_READ_M BIT(28)
#define ICE_TS_LL_TX_TS_INT_READ_M BIT(29)
+#define ICE_TS_LL_PHY_TMR_UPDATE_M BIT(30)
struct ice_ts_dev_info {
/* Device specific info */
@@ -382,6 +385,7 @@ struct ice_ts_dev_info {
u8 tmr1_ena;
u8 ts_ll_read;
u8 ts_ll_int_read;
+ u8 ll_phy_tmr_update;
};
#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
@@ -692,7 +696,6 @@ struct ice_dcb_app_priority_table {
#define ICE_MAX_USER_PRIORITY 8
#define ICE_DCBX_MAX_APPS 64
-#define ICE_DSCP_NUM_VAL 64
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
@@ -715,9 +718,9 @@ struct ice_dcbx_cfg {
u8 pfc_mode;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
/* when DSCP mapping defined by user set its bit to 1 */
- DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL);
+ DECLARE_BITMAP(dscp_mapped, DSCP_MAX);
/* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */
- u8 dscp_map[ICE_DSCP_NUM_VAL];
+ u8 dscp_map[DSCP_MAX];
u8 dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
@@ -848,26 +851,26 @@ struct ice_mbx_data {
#define ICE_PORTS_PER_QUAD 4
#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD)
+#define ATQBAL_FLAGS_INTR_IN_PROGRESS BIT(0)
+
+struct ice_e810_params {
+ /* The wait queue lock also protects the low latency interface */
+ wait_queue_head_t atqbal_wq;
+ unsigned int atqbal_flags;
+};
+
struct ice_eth56g_params {
u8 num_phys;
- u8 phy_addr[2];
bool onestep_ena;
bool sfd_ena;
u32 peer_delay;
};
union ice_phy_params {
+ struct ice_e810_params e810;
struct ice_eth56g_params eth56g;
};
-/* PHY model */
-enum ice_phy_model {
- ICE_PHY_UNSUP = -1,
- ICE_PHY_E810 = 1,
- ICE_PHY_E82X,
- ICE_PHY_ETH56G,
-};
-
/* Global Link Topology */
enum ice_global_link_topo {
ICE_LINK_TOPO_UP_TO_2_LINKS,
@@ -877,11 +880,9 @@ enum ice_global_link_topo {
};
struct ice_ptp_hw {
- enum ice_phy_model phy_model;
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
- bool is_2x50g_muxed_topo;
};
/* Port hardware description */
@@ -969,6 +970,7 @@ struct ice_hw {
u8 intrl_gran;
struct ice_ptp_hw ptp;
+ s8 lane_num;
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
@@ -1216,4 +1218,9 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
+/* AQ API version for Health Status support */
+#define ICE_FW_API_HEALTH_REPORT_MAJ 1
+#define ICE_FW_API_HEALTH_REPORT_MIN 7
+#define ICE_FW_API_HEALTH_REPORT_PATCH 6
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index c7c0c2f50c26..48cd533e93b7 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -226,6 +226,7 @@ static void ice_vf_clear_counters(struct ice_vf *vf)
vsi->num_vlan = 0;
vf->num_mac = 0;
+ vf->num_mac_lldp = 0;
memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
}
@@ -1036,6 +1037,14 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
mutex_init(&vf->cfg_lock);
}
+void ice_deinitialize_vf_entry(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ list_del(&vf->mbx_info.list_entry);
+}
+
/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
@@ -1393,3 +1402,28 @@ struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
rcu_read_unlock();
return ctrl_vsi;
}
+
+/**
+ * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses
+ * @vf: a VF to add the address to
+ * @vsi: the corresponding VSI
+ * @incr: is the rule added or removed
+ */
+void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
+ bool incr)
+{
+ bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags);
+ bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
+ bool is_ena;
+
+ if (WARN_ON(!vsi)) {
+ vf->num_mac_lldp = 0;
+ return;
+ }
+
+ vf->num_mac_lldp += incr ? 1 : -1;
+ is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
+
+ if (was_ena != is_ena)
+ ice_vsi_cfg_sw_lldp(vsi, false, is_ena);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 4261fe1c2bcd..482f4285fd35 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -124,6 +124,10 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
+ u8 lldp_tx_ena:1;
+
+ u32 ptp_caps;
+
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
@@ -131,6 +135,7 @@ struct ice_vf {
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
+ u16 num_mac_lldp;
u16 num_vf_qs; /* num of queue configured per VF */
u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */
#define ICE_INNER_VLAN_STRIP_ENA BIT(0)
@@ -146,6 +151,9 @@ struct ice_vf {
/* devlink port data */
struct devlink_port devlink_port;
+ u16 lldp_recipe_id;
+ u16 lldp_rule_id;
+
u16 num_msix; /* num of MSI-X configured on this VF */
struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF];
};
@@ -177,6 +185,11 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
return vf->port_vlan_info.tpid;
}
+static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf)
+{
+ return vf->num_mac_lldp && vf->trusted;
+}
+
/* VF Hash Table access functions
*
* These functions provide abstraction for interacting with the VF hash table.
@@ -242,6 +255,8 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
int ice_reset_vf(struct ice_vf *vf, u32 flags);
void ice_reset_all_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi);
+void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
+ bool incr);
#else /* CONFIG_PCI_IOV */
static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 0c7e77c0a09f..5392b0404986 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -24,6 +24,7 @@
#endif
void ice_initialize_vf_entry(struct ice_vf *vf);
+void ice_deinitialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
enum virtchnl_status_code ice_err_to_virt_err(int err);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index f445e33b2028..eeeb9968e477 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -498,6 +498,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS;
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_PTP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_PTP;
+
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
@@ -562,7 +565,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
*
* check for the valid queue ID
*/
-static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
{
/* allocated Tx and Rx queues should be always equal for VF VSI */
return qid < vsi->alloc_txq;
@@ -1862,15 +1865,33 @@ static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
for (i = 0; i < qbw->num_queues; i++) {
if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
- qbw->cfg[i].shaper.peak > vf->max_tx_rate)
+ qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
qbw->cfg[i].queue_id, vf->vf_id,
vf->max_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
- qbw->cfg[i].shaper.committed < vf->min_tx_rate)
+ qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
qbw->cfg[i].queue_id, vf->vf_id,
- vf->max_tx_rate);
+ vf->min_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
}
for (i = 0; i < qbw->num_queues; i++) {
@@ -1900,13 +1921,21 @@ err:
*/
static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
{
+ u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 quanta_prof_id, quanta_size, start_qid, end_qid, i;
struct virtchnl_quanta_cfg *qquanta =
(struct virtchnl_quanta_cfg *)msg;
struct ice_vsi *vsi;
int ret;
+ start_qid = qquanta->queue_select.start_queue_id;
+ num_queues = qquanta->queue_select.num_queues;
+
+ if (check_add_overflow(start_qid, num_queues, &end_qid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
@@ -1918,8 +1947,6 @@ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
goto err;
}
- end_qid = qquanta->queue_select.start_queue_id +
- qquanta->queue_select.num_queues;
if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
@@ -1948,7 +1975,6 @@ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
goto err;
}
- start_qid = qquanta->queue_select.start_queue_id;
for (i = start_qid; i < end_qid; i++)
vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
@@ -1975,6 +2001,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct ice_vsi *vsi;
u8 act_prt, pri_prt;
int i = -1, q_idx;
+ bool ena_ts;
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
@@ -2104,9 +2131,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
rxdid = ICE_RXDID_LEGACY_1;
}
+ ena_ts = ((vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
+ (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
+ (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
+
ice_write_qrxflxp_cntxt(&vsi->back->hw,
- vsi->rxq_map[q_idx],
- rxdid, 0x03, false);
+ vsi->rxq_map[q_idx], rxdid,
+ ICE_RXDID_PRIO, ena_ts);
}
}
@@ -2234,6 +2266,51 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
}
/**
+ * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address
+ * @mac: address to check
+ *
+ * Return: true if the address is one of the three possible LLDP multicast
+ * addresses, false otherwise.
+ */
+static bool ice_is_mc_lldp_eth_addr(const u8 *mac)
+{
+ const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00};
+
+ if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base)))
+ return false;
+
+ return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00);
+}
+
+/**
+ * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC
+ * @vf: a VF to add the address to
+ * @mac: address to check
+ *
+ * Return: true if the VF is allowed to add such MAC address, false otherwise.
+ */
+static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+
+ if (is_unicast_ether_addr(mac) &&
+ !ice_can_vf_change_mac((struct ice_vf *)vf)) {
+ dev_err(dev,
+ "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ return false;
+ }
+
+ if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) {
+ dev_warn(dev,
+ "An untrusted VF %u is attempting to configure an LLDP multicast address\n",
+ vf->vf_id);
+ return false;
+ }
+
+ return true;
+}
+
+/**
* ice_vc_add_mac_addr - attempt to add the MAC address passed in
* @vf: pointer to the VF info
* @vsi: pointer to the VF's VSI
@@ -2251,10 +2328,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
return 0;
- if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
- dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ if (!ice_vc_can_add_mac(vf, mac_addr))
return -EPERM;
- }
ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (ret == -EEXIST) {
@@ -2269,6 +2344,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
return ret;
} else {
vf->num_mac++;
+ if (ice_is_mc_lldp_eth_addr(mac_addr))
+ ice_vf_update_mac_lldp_num(vf, vsi, true);
}
ice_vfhw_mac_add(vf, vc_ether_addr);
@@ -2363,6 +2440,8 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
ice_vfhw_mac_del(vf, vc_ether_addr);
vf->num_mac--;
+ if (ice_is_mc_lldp_eth_addr(mac_addr))
+ ice_vf_update_mac_lldp_num(vf, vsi, false);
return 0;
}
@@ -3031,8 +3110,8 @@ err:
static int ice_vc_query_rxdid(struct ice_vf *vf)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_supported_rxdids rxdid = {};
struct ice_pf *pf = vf->pf;
+ u64 rxdid;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3044,7 +3123,7 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
goto err;
}
- rxdid.supported_rxdids = pf->supported_rxdids;
+ rxdid = pf->supported_rxdids;
err:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
@@ -4092,6 +4171,59 @@ out:
v_ret, NULL, 0);
}
+static int ice_vc_get_ptp_cap(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
+ VIRTCHNL_1588_PTP_CAP_READ_PHC;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ if (msg->caps & caps)
+ vf->ptp_caps = caps;
+
+err:
+ /* send the response back to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret,
+ (u8 *)&vf->ptp_caps,
+ sizeof(struct virtchnl_ptp_caps));
+}
+
+static int ice_vc_get_phc_time(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ struct virtchnl_phc_time *phc_time = NULL;
+ struct ice_pf *pf = vf->pf;
+ u32 len = 0;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ phc_time = kzalloc(sizeof(*phc_time), GFP_KERNEL);
+ if (!phc_time) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto err;
+ }
+
+ len = sizeof(*phc_time);
+
+ phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret,
+ (u8 *)phc_time, len);
+ kfree(phc_time);
+ return ret;
+}
+
static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_ver_msg = ice_vc_get_ver_msg,
.get_vf_res_msg = ice_vc_get_vf_res_msg,
@@ -4128,6 +4260,11 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_qos_caps = ice_vc_get_qos_caps,
.cfg_q_bw = ice_vc_cfg_q_bw,
.cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
+ /* If you add a new op here please make sure to add it to
+ * ice_virtchnl_repr_ops as well.
+ */
};
/**
@@ -4185,7 +4322,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
}
ice_vfhw_mac_add(vf, &al->list[i]);
- vf->num_mac++;
break;
}
@@ -4258,6 +4394,11 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+ .get_qos_caps = ice_vc_get_qos_caps,
+ .cfg_q_bw = ice_vc_cfg_q_bw,
+ .cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
};
/**
@@ -4495,6 +4636,12 @@ error_handler:
case VIRTCHNL_OP_CONFIG_QUANTA:
err = ops->cfg_q_quanta(vf, msg);
break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ err = ops->get_ptp_cap(vf, (const void *)msg);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ err = ops->get_phc_time(vf);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index 0c629aef9baf..222990f229d5 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -26,6 +26,9 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* Priority to be compared against previous priority from the pipe */
+#define ICE_RXDID_PRIO 0x03
+
/* VFs only get a single VSI. For ice hardware, the VF does not need to know
* its VSI index. However, the virtchnl interface requires a VSI number,
* mainly due to legacy hardware.
@@ -72,6 +75,9 @@ struct ice_virtchnl_ops {
int (*cfg_q_tc_map)(struct ice_vf *vf, u8 *msg);
int (*cfg_q_bw)(struct ice_vf *vf, u8 *msg);
int (*cfg_q_quanta)(struct ice_vf *vf, u8 *msg);
+ int (*get_ptp_cap)(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg);
+ int (*get_phc_time)(struct ice_vf *vf);
};
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index c105a82ee136..a3d1579a619a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -84,6 +84,12 @@ static const u32 fdir_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER,
};
+/* VIRTCHNL_VF_CAP_PTP */
+static const u32 ptp_allowlist_opcodes[] = {
+ VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ VIRTCHNL_OP_1588_PTP_GET_TIME,
+};
+
static const u32 tc_allowlist_opcodes[] = {
VIRTCHNL_OP_GET_QOS_CAPS, VIRTCHNL_OP_CONFIG_QUEUE_BW,
VIRTCHNL_OP_CONFIG_QUANTA,
@@ -110,6 +116,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_QOS, tc_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_CAP_PTP, ptp_allowlist_opcodes),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 14e3f0f89c78..ae83c3914e29 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -832,21 +832,27 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
struct virtchnl_proto_hdrs *proto,
struct virtchnl_fdir_fltr_conf *conf)
{
- u8 *pkt_buf, *msk_buf __free(kfree);
+ u8 *pkt_buf, *msk_buf __free(kfree) = NULL;
struct ice_parser_result rslt;
struct ice_pf *pf = vf->pf;
+ u16 pkt_len, udp_port = 0;
struct ice_parser *psr;
int status = -ENOMEM;
struct ice_hw *hw;
- u16 udp_port = 0;
- pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
- msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+ pkt_len = proto->raw.pkt_len;
+
+ if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
+ return -EINVAL;
+
+ pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(pkt_len, GFP_KERNEL);
+
if (!pkt_buf || !msk_buf)
goto err_mem_alloc;
- memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len);
- memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len);
+ memcpy(pkt_buf, proto->raw.spec, pkt_len);
+ memcpy(msk_buf, proto->raw.mask, pkt_len);
hw = &pf->hw;
@@ -862,7 +868,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
ice_parser_vxlan_tunnel_set(psr, udp_port, true);
- status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt);
+ status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt);
if (status)
goto err_parser_destroy;
@@ -876,7 +882,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
}
status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
- proto->raw.pkt_len, ICE_BLK_FD,
+ pkt_len, ICE_BLK_FD,
conf->prof);
if (status)
goto err_parser_profile_init;
@@ -885,7 +891,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
ice_parser_profile_dump(hw, conf->prof);
/* Store raw flow info into @conf */
- conf->pkt_len = proto->raw.pkt_len;
+ conf->pkt_len = pkt_len;
conf->pkt_buf = pkt_buf;
conf->parser_ena = true;
@@ -1444,7 +1450,8 @@ err_free_pkt:
*/
static void ice_vf_fdir_timer(struct timer_list *t)
{
- struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
+ struct ice_vf_fdir_ctx *ctx_irq = timer_container_of(ctx_irq, t,
+ rx_tmr);
struct ice_vf_fdir_ctx *ctx_done;
struct ice_vf_fdir *fdir;
unsigned long flags;
@@ -1515,7 +1522,7 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
spin_unlock_irqrestore(&fdir->ctx_lock, flags);
- ret = del_timer(&ctx_irq->rx_tmr);
+ ret = timer_delete(&ctx_irq->rx_tmr);
if (!ret)
dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
@@ -1910,7 +1917,7 @@ static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
unsigned long flags;
- del_timer(&ctx->rx_tmr);
+ timer_delete(&ctx->rx_tmr);
spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
@@ -2091,6 +2098,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err_exit;
+ }
#define ICE_VF_MAX_FDIR_FILTERS 128
if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 334ae945d640..a3a4eaa17739 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ice.h"
@@ -801,35 +802,6 @@ out_failure:
return result;
}
-static int
-ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!size)
- return 0;
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
@@ -895,7 +867,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
if (!first) {
first = xdp;
- } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
+ } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) {
+ xsk_buff_free(first);
break;
}
@@ -1017,7 +990,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
struct ice_tx_desc *tx_desc;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
dma_addr_t dma;
dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 45adeb513253..8dc5d55e26c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -7,14 +7,6 @@
#define PKTS_PER_BATCH 8
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 8") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig
index 1addd663acad..2c359a8551c7 100644
--- a/drivers/net/ethernet/intel/idpf/Kconfig
+++ b/drivers/net/ethernet/intel/idpf/Kconfig
@@ -4,6 +4,7 @@
config IDPF
tristate "Intel(R) Infrastructure Data Path Function Support"
depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
select DIMLIB
select LIBETH
help
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
index 2ce01a0b5898..83ac5e296382 100644
--- a/drivers/net/ethernet/intel/idpf/Makefile
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -17,3 +17,6 @@ idpf-y := \
idpf_vf_dev.o
idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o
+
+idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_ptp.o
+idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_virtchnl_ptp.o
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 66544faab710..1e812c3f62f9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -143,6 +143,7 @@ enum idpf_vport_state {
* @vport_id: Vport identifier
* @link_speed_mbps: Link speed in mbps
* @vport_idx: Relative vport index
+ * @max_tx_hdr_size: Max header length hardware can support
* @state: See enum idpf_vport_state
* @netstats: Packet and byte stats
* @stats_lock: Lock to protect stats update
@@ -153,6 +154,7 @@ struct idpf_netdev_priv {
u32 vport_id;
u32 link_speed_mbps;
u16 vport_idx;
+ u16 max_tx_hdr_size;
enum idpf_vport_state state;
struct rtnl_link_stats64 netstats;
spinlock_t stats_lock;
@@ -189,6 +191,7 @@ struct idpf_vport_max_q {
* @mb_intr_reg_init: Mailbox interrupt register initialization
* @reset_reg_init: Reset register initialization
* @trigger_reset: Trigger a reset to occur
+ * @ptp_reg_init: PTP register initialization
*/
struct idpf_reg_ops {
void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq);
@@ -197,6 +200,7 @@ struct idpf_reg_ops {
void (*reset_reg_init)(struct idpf_adapter *adapter);
void (*trigger_reset)(struct idpf_adapter *adapter,
enum idpf_flags trig_cause);
+ void (*ptp_reg_init)(const struct idpf_adapter *adapter);
};
/**
@@ -290,6 +294,9 @@ struct idpf_port_stats {
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
* @sw_marker_wq: workqueue for marker packets
+ * @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
+ * @tstamp_config: The Tx tstamp config
+ * @tstamp_task: Tx timestamping task
*/
struct idpf_vport {
u16 num_txq;
@@ -334,6 +341,10 @@ struct idpf_vport {
bool link_up;
wait_queue_head_t sw_marker_wq;
+
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct kernel_hwtstamp_config tstamp_config;
+ struct work_struct tstamp_task;
};
/**
@@ -478,6 +489,13 @@ struct idpf_vport_config {
struct idpf_vc_xn_manager;
+#define idpf_for_each_vport(adapter, iter) \
+ for (struct idpf_vport **__##iter = &(adapter)->vports[0], \
+ *iter = (adapter)->max_vports ? *__##iter : NULL; \
+ iter; \
+ iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \
+ *__##iter : NULL)
+
/**
* struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe
@@ -530,6 +548,7 @@ struct idpf_vc_xn_manager;
* @vector_lock: Lock to protect vector distribution
* @queue_lock: Lock to protect queue distribution
* @vc_buf_lock: Lock to protect virtchnl buffer
+ * @ptp: Storage for PTP-related data
*/
struct idpf_adapter {
struct pci_dev *pdev;
@@ -587,6 +606,8 @@ struct idpf_adapter {
struct mutex vector_lock;
struct mutex queue_lock;
struct mutex vc_buf_lock;
+
+ struct idpf_ptp *ptp;
};
/**
@@ -629,13 +650,13 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
-#define IDPF_CAP_RX_CSUM_L4V4 (\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP)
+#define IDPF_CAP_TX_CSUM_L4V4 (\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP)
-#define IDPF_CAP_RX_CSUM_L4V6 (\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
+#define IDPF_CAP_TX_CSUM_L4V6 (\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP)
#define IDPF_CAP_RX_CSUM (\
VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\
@@ -644,11 +665,9 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
-#define IDPF_CAP_SCTP_CSUM (\
+#define IDPF_CAP_TX_SCTP_CSUM (\
VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\
- VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP)
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP)
#define IDPF_CAP_TUNNEL_TX_CSUM (\
VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
index 4849590a5591..48b8e184f3db 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
*/
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
/* free ring buffers and the ring itself */
idpf_ctlq_dealloc_ring_res(hw, cq);
@@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
/* Set ring_size to 0 to indicate uninitialized queue */
cq->ring_size = 0;
- mutex_unlock(&cq->cq_lock);
- mutex_destroy(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
}
/**
@@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
idpf_ctlq_init_regs(hw, cq, is_rxq);
- mutex_init(&cq->cq_lock);
+ spin_lock_init(&cq->cq_lock);
list_add(&cq->cq_list, &hw->cq_list_head);
@@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
int err = 0;
int i;
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
/* Ensure there are enough descriptors to send all messages */
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
@@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
wr32(hw, cq->reg.tail, cq->next_to_use);
err_unlock:
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
return err;
}
@@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
if (*clean_count > cq->ring_size)
return -EBADR;
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
ntc = cq->next_to_clean;
@@ -376,6 +375,9 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))
break;
+ /* Ensure no other fields are read until DD flag is checked */
+ dma_rmb();
+
/* strip off FW internal code */
desc_err = le16_to_cpu(desc->ret_val) & 0xff;
@@ -394,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
cq->next_to_clean = ntc;
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
/* Return number of descriptors actually cleaned */
*clean_count = i;
@@ -432,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
if (*buff_count > 0)
buffs_avail = true;
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
if (tbp >= cq->ring_size)
tbp = 0;
@@ -521,7 +523,7 @@ post_buffs_out:
wr32(hw, cq->reg.tail, cq->next_to_post);
}
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
/* return the number of buffers that were not posted */
*buff_count = *buff_count - i;
@@ -549,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
u16 i;
/* take the lock before we start messing with the ring */
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
ntc = cq->next_to_clean;
@@ -563,6 +565,9 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
if (!(flags & IDPF_CTLQ_FLAG_DD))
break;
+ /* Ensure no other fields are read until DD flag is checked */
+ dma_rmb();
+
q_msg[i].vmvf_type = (flags &
(IDPF_CTLQ_FLAG_FTYPE_VM |
IDPF_CTLQ_FLAG_FTYPE_PF)) >>
@@ -608,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
cq->next_to_clean = ntc;
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
*num_q_msg = i;
if (*num_q_msg == 0)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
index e8e046ef2f0d..3414c5f9a831 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -99,7 +99,7 @@ struct idpf_ctlq_info {
enum idpf_ctlq_type cq_type;
int q_id;
- struct mutex cq_lock; /* control queue lock */
+ spinlock_t cq_lock; /* control queue lock */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
@@ -123,9 +123,12 @@ struct idpf_ctlq_info {
/**
* enum idpf_mbx_opc - PF/VF mailbox commands
* @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP
+ * @idpf_mbq_opc_send_msg_to_peer_drv: used by PF or VF to send a message to
+ * any peer driver
*/
enum idpf_mbx_opc {
idpf_mbq_opc_send_msg_to_cp = 0x0801,
+ idpf_mbq_opc_send_msg_to_peer_drv = 0x0804,
};
/* API supported for control queue management */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 6c913a703df6..3fae81f1f988 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -4,6 +4,7 @@
#include "idpf.h"
#include "idpf_lan_pf_regs.h"
#include "idpf_virtchnl.h"
+#include "idpf_ptp.h"
#define IDPF_PF_ITR_IDX_SPACING 0x4
@@ -101,6 +102,9 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M;
+ intr->dyn_ctl_swint_trig_m = PF_GLINT_DYN_CTL_SWINT_TRIG_M;
+ intr->dyn_ctl_sw_itridx_ena_m =
+ PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_PF_ITR_IDX_SPACING);
@@ -146,6 +150,18 @@ static void idpf_trigger_reset(struct idpf_adapter *adapter,
}
/**
+ * idpf_ptp_reg_init - Initialize required registers
+ * @adapter: Driver specific private structure
+ *
+ * Set the bits required for enabling shtime and cmd execution
+ */
+static void idpf_ptp_reg_init(const struct idpf_adapter *adapter)
+{
+ adapter->ptp->cmd.shtime_enable_mask = PF_GLTSYN_CMD_SYNC_SHTIME_EN_M;
+ adapter->ptp->cmd.exec_cmd_mask = PF_GLTSYN_CMD_SYNC_EXEC_CMD_M;
+}
+
+/**
* idpf_reg_ops_init - Initialize register API function pointers
* @adapter: Driver specific private structure
*/
@@ -156,6 +172,7 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter)
adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init;
adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init;
adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset;
+ adapter->dev_ops.reg_ops.ptp_reg_init = idpf_ptp_reg_init;
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 59b1a1a09996..eaf7a2606faa 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_ptp.h"
/**
* idpf_get_rxnfc - command to get RX flow classification rules
@@ -46,7 +47,7 @@ static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
struct idpf_vport_user_config_data *user_config;
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
- return -EOPNOTSUPP;
+ return 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
@@ -65,7 +66,7 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
struct idpf_vport_user_config_data *user_config;
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
- return -EOPNOTSUPP;
+ return 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
@@ -1312,6 +1313,71 @@ static int idpf_get_link_ksettings(struct net_device *netdev,
return 0;
}
+/**
+ * idpf_get_timestamp_filters - Get the supported timestamping mode
+ * @vport: Virtual port structure
+ * @info: ethtool timestamping info structure
+ *
+ * Get the Tx/Rx timestamp filters.
+ */
+static void idpf_get_timestamp_filters(const struct idpf_vport *vport,
+ struct kernel_ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ if (!vport->tx_tstamp_caps ||
+ vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE)
+ return;
+
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE;
+
+ info->tx_types |= BIT(HWTSTAMP_TX_ON);
+}
+
+/**
+ * idpf_get_ts_info - Get device PHC association
+ * @netdev: network interface device structure
+ * @info: ethtool timestamping info structure
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ int err = 0;
+
+ if (!mutex_trylock(&np->adapter->vport_ctrl_lock))
+ return -EBUSY;
+
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->adapter->ptp) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) &&
+ vport->adapter->ptp->clock) {
+ info->phc_index = ptp_clock_index(vport->adapter->ptp->clock);
+ idpf_get_timestamp_filters(vport, info);
+ } else {
+ pci_dbg(vport->adapter->pdev, "PTP clock not detected\n");
+ err = ethtool_op_get_ts_info(netdev, info);
+ }
+
+unlock:
+ mutex_unlock(&np->adapter->vport_ctrl_lock);
+
+ return err;
+}
+
static const struct ethtool_ops idpf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1336,6 +1402,7 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.get_ringparam = idpf_get_ringparam,
.set_ringparam = idpf_set_ringparam,
.get_link_ksettings = idpf_get_link_ksettings,
+ .get_ts_info = idpf_get_ts_info,
};
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
index 24edb8a6ec2e..cc9aa2b6a14a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
@@ -53,6 +53,10 @@
#define PF_FW_ATQH_ATQH_M GENMASK(9, 0)
#define PF_FW_ATQT (PF_FW_BASE + 0x24)
+/* Timesync registers */
+#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M GENMASK(1, 0)
+#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(2)
+
/* Interrupts */
#define PF_GLINT_BASE 0x08900000
#define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000))
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
index 8c7f8ef8f1a1..7492d1713243 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
@@ -282,7 +282,18 @@ struct idpf_flex_tx_tso_ctx_qw {
u8 flex;
};
-struct idpf_flex_tx_ctx_desc {
+union idpf_flex_tx_ctx_desc {
+ /* DTYPE = IDPF_TX_DESC_DTYPE_CTX (0x01) */
+ struct {
+ __le64 qw0;
+#define IDPF_TX_CTX_L2TAG2_M GENMASK_ULL(47, 32)
+ __le64 qw1;
+#define IDPF_TX_CTX_DTYPE_M GENMASK_ULL(3, 0)
+#define IDPF_TX_CTX_CMD_M GENMASK_ULL(15, 4)
+#define IDPF_TX_CTX_TSYN_REG_M GENMASK_ULL(47, 30)
+#define IDPF_TX_CTX_MSS_M GENMASK_ULL(50, 63)
+ } tsyn;
+
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
struct {
struct idpf_flex_tx_tso_ctx_qw qw0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index b4fbb99bfad2..80382ff4a5fa 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
+#include "idpf_ptp.h"
static const struct net_device_ops idpf_netdev_ops;
@@ -144,22 +145,6 @@ static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
}
/**
- * idpf_set_mb_vec_id - Set vector index for mailbox
- * @adapter: adapter structure to access the vector chunks
- *
- * The first vector id in the requested vector chunks from the CP is for
- * the mailbox
- */
-static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
-{
- if (adapter->req_vec_chunks)
- adapter->mb_vector.v_idx =
- le16_to_cpu(adapter->caps.mailbox_vector_id);
- else
- adapter->mb_vector.v_idx = 0;
-}
-
-/**
* idpf_mb_intr_init - Initialize the mailbox interrupt
* @adapter: adapter structure to store the mailbox vector
*/
@@ -349,7 +334,7 @@ int idpf_intr_req(struct idpf_adapter *adapter)
goto free_irq;
}
- idpf_set_mb_vec_id(adapter);
+ adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id);
vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
if (!vecids) {
@@ -703,8 +688,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
+ netdev_features_t other_offloads = 0;
+ netdev_features_t csum_offloads = 0;
+ netdev_features_t tso_offloads = 0;
netdev_features_t dflt_features;
- netdev_features_t offloads = 0;
struct idpf_netdev_priv *np;
struct net_device *netdev;
u16 idx = vport->idx;
@@ -721,6 +708,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->vport = vport;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
+ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
vport->netdev = netdev;
return idpf_init_mac_addr(vport, netdev);
@@ -738,6 +726,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->adapter = adapter;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
+ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
spin_lock_init(&np->stats_lock);
@@ -766,54 +755,34 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
dflt_features |= NETIF_F_RXHASH;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
- dflt_features |= NETIF_F_IP_CSUM;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
- dflt_features |= NETIF_F_IPV6_CSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
+ csum_offloads |= NETIF_F_IP_CSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
+ csum_offloads |= NETIF_F_IPV6_CSUM;
if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
- dflt_features |= NETIF_F_RXCSUM;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
- dflt_features |= NETIF_F_SCTP_CRC;
+ csum_offloads |= NETIF_F_RXCSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM))
+ csum_offloads |= NETIF_F_SCTP_CRC;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
- dflt_features |= NETIF_F_TSO;
+ tso_offloads |= NETIF_F_TSO;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
- dflt_features |= NETIF_F_TSO6;
+ tso_offloads |= NETIF_F_TSO6;
if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
VIRTCHNL2_CAP_SEG_IPV4_UDP |
VIRTCHNL2_CAP_SEG_IPV6_UDP))
- dflt_features |= NETIF_F_GSO_UDP_L4;
+ tso_offloads |= NETIF_F_GSO_UDP_L4;
if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
- offloads |= NETIF_F_GRO_HW;
- /* advertise to stack only if offloads for encapsulated packets is
- * supported
- */
- if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
- VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
- offloads |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM |
- NETIF_F_GSO_PARTIAL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_IPXIP4 |
- NETIF_F_GSO_IPXIP6 |
- 0;
-
- if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
- IDPF_CAP_TUNNEL_TX_CSUM))
- netdev->gso_partial_features |=
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
- offloads |= NETIF_F_TSO_MANGLEID;
- }
+ other_offloads |= NETIF_F_GRO_HW;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
- offloads |= NETIF_F_LOOPBACK;
+ other_offloads |= NETIF_F_LOOPBACK;
- netdev->features |= dflt_features;
- netdev->hw_features |= dflt_features | offloads;
- netdev->hw_enc_features |= dflt_features | offloads;
+ netdev->features |= dflt_features | csum_offloads | tso_offloads;
+ netdev->hw_features |= netdev->features | other_offloads;
+ netdev->vlan_features |= netdev->features | other_offloads;
+ netdev->hw_enc_features |= dflt_features | other_offloads;
idpf_set_ethtool_ops(netdev);
+ netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
/* carrier off on init to avoid Tx hangs */
@@ -927,15 +896,19 @@ static int idpf_stop(struct net_device *netdev)
static void idpf_decfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
+ u16 idx = vport->idx;
kfree(vport->rx_ptype_lkup);
vport->rx_ptype_lkup = NULL;
- unregister_netdev(vport->netdev);
- free_netdev(vport->netdev);
+ if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
+ adapter->vport_config[idx]->flags)) {
+ unregister_netdev(vport->netdev);
+ free_netdev(vport->netdev);
+ }
vport->netdev = NULL;
- adapter->netdevs[vport->idx] = NULL;
+ adapter->netdevs[idx] = NULL;
}
/**
@@ -1127,11 +1100,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
num_max_q = max(max_q->max_txq, max_q->max_rxq);
vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
- if (!vport->q_vector_idxs) {
- kfree(vport);
+ if (!vport->q_vector_idxs)
+ goto free_vport;
- return NULL;
- }
idpf_vport_init(vport, max_q);
/* This alloc is done separate from the LUT because it's not strictly
@@ -1141,11 +1112,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
*/
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
- if (!rss_data->rss_key) {
- kfree(vport);
+ if (!rss_data->rss_key)
+ goto free_vector_idxs;
- return NULL;
- }
/* Initialize default rss key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
@@ -1158,6 +1127,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
adapter->next_vport = idpf_get_free_slot(adapter);
return vport;
+
+free_vector_idxs:
+ kfree(vport->q_vector_idxs);
+free_vport:
+ kfree(vport);
+
+ return NULL;
}
/**
@@ -1536,13 +1512,22 @@ void idpf_init_task(struct work_struct *work)
}
for (index = 0; index < adapter->max_vports; index++) {
- if (adapter->netdevs[index] &&
- !test_bit(IDPF_VPORT_REG_NETDEV,
- adapter->vport_config[index]->flags)) {
- register_netdev(adapter->netdevs[index]);
- set_bit(IDPF_VPORT_REG_NETDEV,
- adapter->vport_config[index]->flags);
+ struct net_device *netdev = adapter->netdevs[index];
+ struct idpf_vport_config *vport_config;
+
+ vport_config = adapter->vport_config[index];
+
+ if (!netdev ||
+ test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags))
+ continue;
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n",
+ index, ERR_PTR(err));
+ continue;
}
+ set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
}
/* As all the required vports are created, clear the reset flag
@@ -1816,11 +1801,19 @@ void idpf_vc_event_task(struct work_struct *work)
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
return;
- if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
- test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
- set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
- idpf_init_hard_reset(adapter);
- }
+ if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags))
+ goto func_reset;
+
+ if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags))
+ goto drv_load;
+
+ return;
+
+func_reset:
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+drv_load:
+ set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+ idpf_init_hard_reset(adapter);
}
/**
@@ -2159,8 +2152,13 @@ static int idpf_open(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
+ err = idpf_set_real_num_queues(vport);
+ if (err)
+ goto unlock;
+
err = idpf_vport_open(vport);
+unlock:
idpf_vport_ctrl_unlock(netdev);
return err;
@@ -2200,8 +2198,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ u16 max_tx_hdr_size = np->max_tx_hdr_size;
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@@ -2224,7 +2222,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
goto unsupported;
len = skb_network_header_len(skb);
- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ if (unlikely(len > max_tx_hdr_size))
goto unsupported;
if (!skb->encapsulation)
@@ -2237,7 +2235,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
/* IPLEN can support at most 127 dwords */
len = skb_inner_network_header_len(skb);
- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ if (unlikely(len > max_tx_hdr_size))
goto unsupported;
/* No need to validate L4LEN as TCP is the only protocol with a
@@ -2316,8 +2314,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
struct idpf_adapter *adapter = hw->back;
size_t sz = ALIGN(size, 4096);
- mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
- &mem->pa, GFP_KERNEL);
+ /* The control queue resources are freed under a spinlock, contiguous
+ * pages will avoid IOMMU remapping and the use vmap (and vunmap in
+ * dma_free_*() path.
+ */
+ mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
+ GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = sz;
return mem->va;
@@ -2332,13 +2334,67 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
{
struct idpf_adapter *adapter = hw->back;
- dma_free_coherent(&adapter->pdev->dev, mem->size,
- mem->va, mem->pa);
+ dma_free_attrs(&adapter->pdev->dev, mem->size,
+ mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = 0;
mem->va = NULL;
mem->pa = 0;
}
+static int idpf_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct idpf_vport *vport;
+ int err;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->link_up) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EPERM;
+ }
+
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
+ !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EOPNOTSUPP;
+ }
+
+ err = idpf_ptp_set_timestamp_mode(vport, config);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+static int idpf_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct idpf_vport *vport;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->link_up) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EPERM;
+ }
+
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
+ !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
+ idpf_vport_ctrl_unlock(netdev);
+ return 0;
+ }
+
+ *config = vport->tstamp_config;
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return 0;
+}
+
static const struct net_device_ops idpf_netdev_ops = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
@@ -2351,4 +2407,6 @@ static const struct net_device_ops idpf_netdev_ops = {
.ndo_get_stats64 = idpf_get_stats64,
.ndo_set_features = idpf_set_features,
.ndo_tx_timeout = idpf_tx_timeout,
+ .ndo_hwtstamp_get = idpf_hwtstamp_get,
+ .ndo_hwtstamp_set = idpf_hwtstamp_set,
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index db476b3314c8..0efd9c0c7a90 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -8,7 +8,7 @@
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
MODULE_DESCRIPTION(DRV_SUMMARY);
-MODULE_IMPORT_NS(LIBETH);
+MODULE_IMPORT_NS("LIBETH");
MODULE_LICENSE("GPL");
/**
@@ -87,7 +87,12 @@ destroy_wqs:
*/
static void idpf_shutdown(struct pci_dev *pdev)
{
- idpf_remove(pdev);
+ struct idpf_adapter *adapter = pci_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&adapter->serv_task);
+ cancel_delayed_work_sync(&adapter->vc_event_task);
+ idpf_vc_core_deinit(adapter);
+ idpf_deinit_dflt_mbx(adapter);
if (system_state == SYSTEM_POWER_OFF)
pci_set_power_state(pdev, PCI_D3hot);
@@ -163,6 +168,10 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free;
}
+ err = pci_enable_ptm(pdev, NULL);
+ if (err)
+ pci_dbg(pdev, "PCIe PTM is not supported by PCIe bus/controller\n");
+
/* set up for high or low dma */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err) {
@@ -174,7 +183,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_set_drvdata(pdev, adapter);
- adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0,
+ adapter->init_wq = alloc_workqueue("%s-%s-init",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->init_wq) {
@@ -183,7 +193,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free;
}
- adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0,
+ adapter->serv_wq = alloc_workqueue("%s-%s-service",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->serv_wq) {
@@ -192,8 +203,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_serv_wq_alloc;
}
- adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0,
- dev_driver_string(dev),
+ adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", WQ_UNBOUND | WQ_HIGHPRI,
+ 0, dev_driver_string(dev),
dev_name(dev));
if (!adapter->mbx_wq) {
dev_err(dev, "Failed to allocate mailbox workqueue\n");
@@ -201,7 +212,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_mbx_wq_alloc;
}
- adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0,
+ adapter->stats_wq = alloc_workqueue("%s-%s-stats",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->stats_wq) {
@@ -210,7 +222,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_stats_wq_alloc;
}
- adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0,
+ adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->vc_event_wq) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
new file mode 100644
index 000000000000..4f8725c85332
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -0,0 +1,873 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_ptp.h"
+
+/**
+ * idpf_ptp_get_access - Determine the access type of the PTP features
+ * @adapter: Driver specific private structure
+ * @direct: Capability that indicates the direct access
+ * @mailbox: Capability that indicates the mailbox access
+ *
+ * Return: the type of supported access for the PTP feature.
+ */
+static enum idpf_ptp_access
+idpf_ptp_get_access(const struct idpf_adapter *adapter, u32 direct, u32 mailbox)
+{
+ if (adapter->ptp->caps & direct)
+ return IDPF_PTP_DIRECT;
+ else if (adapter->ptp->caps & mailbox)
+ return IDPF_PTP_MAILBOX;
+ else
+ return IDPF_PTP_NONE;
+}
+
+/**
+ * idpf_ptp_get_features_access - Determine the access type of PTP features
+ * @adapter: Driver specific private structure
+ *
+ * Fulfill the adapter structure with type of the supported PTP features
+ * access.
+ */
+void idpf_ptp_get_features_access(const struct idpf_adapter *adapter)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+ u32 direct, mailbox;
+
+ /* Get the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB;
+ ptp->get_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Set the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
+ ptp->set_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Adjust the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK;
+ mailbox = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB;
+ ptp->adj_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Tx timestamping */
+ direct = VIRTCHNL2_CAP_PTP_TX_TSTAMPS;
+ mailbox = VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB;
+ ptp->tx_tstamp_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+}
+
+/**
+ * idpf_ptp_enable_shtime - Enable shadow time and execute a command
+ * @adapter: Driver specific private structure
+ */
+static void idpf_ptp_enable_shtime(struct idpf_adapter *adapter)
+{
+ u32 shtime_enable, exec_cmd;
+
+ /* Get offsets */
+ shtime_enable = adapter->ptp->cmd.shtime_enable_mask;
+ exec_cmd = adapter->ptp->cmd.exec_cmd_mask;
+
+ /* Set the shtime en and the sync field */
+ writel(shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync);
+ writel(exec_cmd | shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync);
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg_direct - Read directly the main timer value
+ * @adapter: Driver specific private structure
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored when NULL is given.
+ *
+ * Return: the device clock time.
+ */
+static u64 idpf_ptp_read_src_clk_reg_direct(struct idpf_adapter *adapter,
+ struct ptp_system_timestamp *sts)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+ u32 hi, lo;
+
+ spin_lock(&ptp->read_dev_clk_lock);
+
+ /* Read the system timestamp pre PHC read */
+ ptp_read_system_prets(sts);
+
+ idpf_ptp_enable_shtime(adapter);
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
+ hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
+
+ spin_unlock(&ptp->read_dev_clk_lock);
+
+ return ((u64)hi << 32) | lo;
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg_mailbox - Read the main timer value through mailbox
+ * @adapter: Driver specific private structure
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored when NULL is given.
+ * @src_clk: Returned main timer value in nanoseconds unit
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_read_src_clk_reg_mailbox(struct idpf_adapter *adapter,
+ struct ptp_system_timestamp *sts,
+ u64 *src_clk)
+{
+ struct idpf_ptp_dev_timers clk_time;
+ int err;
+
+ /* Read the system timestamp pre PHC read */
+ ptp_read_system_prets(sts);
+
+ err = idpf_ptp_get_dev_clk_time(adapter, &clk_time);
+ if (err)
+ return err;
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ *src_clk = clk_time.dev_clk_time_ns;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg - Read the main timer value
+ * @adapter: Driver specific private structure
+ * @src_clk: Returned main timer value in nanoseconds unit
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * Return: the device clock time on success, -errno otherwise.
+ */
+static int idpf_ptp_read_src_clk_reg(struct idpf_adapter *adapter, u64 *src_clk,
+ struct ptp_system_timestamp *sts)
+{
+ switch (adapter->ptp->get_dev_clk_time_access) {
+ case IDPF_PTP_NONE:
+ return -EOPNOTSUPP;
+ case IDPF_PTP_MAILBOX:
+ return idpf_ptp_read_src_clk_reg_mailbox(adapter, sts, src_clk);
+ case IDPF_PTP_DIRECT:
+ *src_clk = idpf_ptp_read_src_clk_reg_direct(adapter, sts);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_gettimex64 - Get the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure to hold the current time value
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * Return: the device clock value in ns, after converting it into a timespec
+ * struct on success, -errno otherwise.
+ */
+static int idpf_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ u64 time_ns;
+ int err;
+
+ err = idpf_ptp_read_src_clk_reg(adapter, &time_ns, sts);
+ if (err)
+ return -EACCES;
+
+ *ts = ns_to_timespec64(time_ns);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_update_phctime_rxq_grp - Update the cached PHC time for a given Rx
+ * queue group.
+ * @grp: receive queue group in which Rx timestamp is enabled
+ * @split: Indicates whether the queue model is split or single queue
+ * @systime: Cached system time
+ */
+static void
+idpf_ptp_update_phctime_rxq_grp(const struct idpf_rxq_group *grp, bool split,
+ u64 systime)
+{
+ struct idpf_rx_queue *rxq;
+ u16 i;
+
+ if (!split) {
+ for (i = 0; i < grp->singleq.num_rxq; i++) {
+ rxq = grp->singleq.rxqs[i];
+ if (rxq)
+ WRITE_ONCE(rxq->cached_phc_time, systime);
+ }
+ } else {
+ for (i = 0; i < grp->splitq.num_rxq_sets; i++) {
+ rxq = &grp->splitq.rxq_sets[i]->rxq;
+ if (rxq)
+ WRITE_ONCE(rxq->cached_phc_time, systime);
+ }
+ }
+}
+
+/**
+ * idpf_ptp_update_cached_phctime - Update the cached PHC time values
+ * @adapter: Driver specific private structure
+ *
+ * This function updates the system time values which are cached in the adapter
+ * structure and the Rx queues.
+ *
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter)
+{
+ u64 systime;
+ int err;
+
+ err = idpf_ptp_read_src_clk_reg(adapter, &systime, NULL);
+ if (err)
+ return -EACCES;
+
+ /* Update the cached PHC time stored in the adapter structure.
+ * These values are used to extend Tx timestamp values to 64 bit
+ * expected by the stack.
+ */
+ WRITE_ONCE(adapter->ptp->cached_phc_time, systime);
+ WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies);
+
+ idpf_for_each_vport(adapter, vport) {
+ bool split;
+
+ if (!vport || !vport->rxq_grps)
+ continue;
+
+ split = idpf_is_queue_model_split(vport->rxq_model);
+
+ for (u16 i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+
+ idpf_ptp_update_phctime_rxq_grp(grp, split, systime);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_settime64 - Set the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ int err;
+ u64 ns;
+
+ access = adapter->ptp->set_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ ns = timespec64_to_ns(ts);
+
+ err = idpf_ptp_set_dev_clk_time(adapter, ns);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to set the time, err: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_ptp_update_cached_phctime(adapter);
+ if (err)
+ pci_warn(adapter->pdev,
+ "Unable to immediately update cached PHC time\n");
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
+{
+ struct timespec64 now, then;
+ int err;
+
+ err = idpf_ptp_gettimex64(info, &now, NULL);
+ if (err)
+ return err;
+
+ then = ns_to_timespec64(delta);
+ now = timespec64_add(now, then);
+
+ return idpf_ptp_settime64(info, &now);
+}
+
+/**
+ * idpf_ptp_adjtime - Adjust the time of the clock by the indicated delta
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ int err;
+
+ access = adapter->ptp->adj_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ /* Hardware only supports atomic adjustments using signed 32-bit
+ * integers. For any adjustment outside this range, perform
+ * a non-atomic get->adjust->set flow.
+ */
+ if (delta > S32_MAX || delta < S32_MIN)
+ return idpf_ptp_adjtime_nonatomic(info, delta);
+
+ err = idpf_ptp_adj_dev_clk_time(adapter, delta);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to adjust the clock with delta %lld err: %pe\n",
+ delta, ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_ptp_update_cached_phctime(adapter);
+ if (err)
+ pci_warn(adapter->pdev,
+ "Unable to immediately update cached PHC time\n");
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adjfine - Adjust clock increment rate
+ * @info: the driver's PTP info structure
+ * @scaled_ppm: Parts per million with 16-bit fractional field
+ *
+ * Adjust the frequency of the clock by the indicated scaled ppm from the
+ * base frequency.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ u64 incval, diff;
+ int err;
+
+ access = adapter->ptp->adj_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ incval = adapter->ptp->base_incval;
+
+ diff = adjust_by_scaled_ppm(incval, scaled_ppm);
+ err = idpf_ptp_adj_dev_clk_fine(adapter, diff);
+ if (err)
+ pci_err(adapter->pdev, "Failed to adjust clock increment rate for scaled ppm %ld %pe\n",
+ scaled_ppm, ERR_PTR(err));
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_verify_pin - Verify if pin supports requested pin function
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Return: EOPNOTSUPP as not supported yet.
+ */
+static int idpf_ptp_verify_pin(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * idpf_ptp_gpio_enable - Enable/disable ancillary features of PHC
+ * @info: the driver's PTP info structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ *
+ * Return: EOPNOTSUPP as not supported yet.
+ */
+static int idpf_ptp_gpio_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * idpf_ptp_tstamp_extend_32b_to_64b - Convert a 32b nanoseconds Tx or Rx
+ * timestamp value to 64b.
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_timestamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ *
+ * Return: Tx timestamp value extended to 64 bits based on cached PHC time.
+ */
+u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp)
+{
+ u32 delta, phc_time_lo;
+ u64 ns;
+
+ /* Extract the lower 32 bits of the PHC time */
+ phc_time_lo = (u32)cached_phc_time;
+
+ /* Calculate the delta between the lower 32bits of the cached PHC
+ * time and the in_timestamp value.
+ */
+ delta = in_timestamp - phc_time_lo;
+
+ if (delta > U32_MAX / 2) {
+ /* Reverse the delta calculation here */
+ delta = phc_time_lo - in_timestamp;
+ ns = cached_phc_time - delta;
+ } else {
+ ns = cached_phc_time + delta;
+ }
+
+ return ns;
+}
+
+/**
+ * idpf_ptp_extend_ts - Convert a 40b timestamp to 64b nanoseconds
+ * @vport: Virtual port structure
+ * @in_tstamp: Ingress/egress timestamp value
+ *
+ * It is assumed that the caller verifies the timestamp is valid prior to
+ * calling this function.
+ *
+ * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
+ * time stored in the device private PTP structure as the basis for timestamp
+ * extension.
+ *
+ * Return: Tx timestamp value extended to 64 bits.
+ */
+u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp)
+{
+ struct idpf_ptp *ptp = vport->adapter->ptp;
+ unsigned long discard_time;
+
+ discard_time = ptp->cached_phc_jiffies + 2 * HZ;
+
+ if (time_is_before_jiffies(discard_time))
+ return 0;
+
+ return idpf_ptp_tstamp_extend_32b_to_64b(ptp->cached_phc_time,
+ lower_32_bits(in_tstamp));
+}
+
+/**
+ * idpf_ptp_request_ts - Request an available Tx timestamp index
+ * @tx_q: Transmit queue on which the Tx timestamp is requested
+ * @skb: The SKB to associate with this timestamp request
+ * @idx: Index of the Tx timestamp latch
+ *
+ * Request tx timestamp index negotiated during PTP init that will be set into
+ * Tx descriptor.
+ *
+ * Return: 0 and the index that can be provided to Tx descriptor on success,
+ * -errno otherwise.
+ */
+int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ u32 *idx)
+{
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
+ struct list_head *head;
+
+ /* Get the index from the free latches list */
+ spin_lock(&tx_q->cached_tstamp_caps->latches_lock);
+
+ head = &tx_q->cached_tstamp_caps->latches_free;
+ if (list_empty(head)) {
+ spin_unlock(&tx_q->cached_tstamp_caps->latches_lock);
+ return -ENOBUFS;
+ }
+
+ ptp_tx_tstamp = list_first_entry(head, struct idpf_ptp_tx_tstamp,
+ list_member);
+ list_del(&ptp_tx_tstamp->list_member);
+
+ ptp_tx_tstamp->skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /* Move the element to the used latches list */
+ list_add(&ptp_tx_tstamp->list_member,
+ &tx_q->cached_tstamp_caps->latches_in_use);
+ spin_unlock(&tx_q->cached_tstamp_caps->latches_lock);
+
+ *idx = ptp_tx_tstamp->idx;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_set_rx_tstamp - Enable or disable Rx timestamping
+ * @vport: Virtual port structure
+ * @rx_filter: Receive timestamp filter
+ */
+static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
+{
+ bool enable = true, splitq;
+
+ splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ if (rx_filter == HWTSTAMP_FILTER_NONE) {
+ enable = false;
+ vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ } else {
+ vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ for (u16 i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ struct idpf_rx_queue *rx_queue;
+ u16 j, num_rxq;
+
+ if (splitq)
+ num_rxq = grp->splitq.num_rxq_sets;
+ else
+ num_rxq = grp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ if (splitq)
+ rx_queue = &grp->splitq.rxq_sets[j]->rxq;
+ else
+ rx_queue = grp->singleq.rxqs[j];
+
+ if (enable)
+ idpf_queue_set(PTP, rx_queue);
+ else
+ idpf_queue_clear(PTP, rx_queue);
+ }
+ }
+}
+
+/**
+ * idpf_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
+ * @vport: Virtual port structure
+ * @config: Hwtstamp settings requested or saved
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config)
+{
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vport->tstamp_config.tx_type = config->tx_type;
+ idpf_ptp_set_rx_tstamp(vport, config->rx_filter);
+ *config = vport->tstamp_config;
+
+ return 0;
+}
+
+/**
+ * idpf_tstamp_task - Delayed task to handle Tx tstamps
+ * @work: work_struct handle
+ */
+void idpf_tstamp_task(struct work_struct *work)
+{
+ struct idpf_vport *vport;
+
+ vport = container_of(work, struct idpf_vport, tstamp_task);
+
+ idpf_ptp_get_tx_tstamp(vport);
+}
+
+/**
+ * idpf_ptp_do_aux_work - Do PTP periodic work
+ * @info: Driver's PTP info structure
+ *
+ * Return: Number of jiffies to periodic work.
+ */
+static long idpf_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+
+ idpf_ptp_update_cached_phctime(adapter);
+
+ return msecs_to_jiffies(500);
+}
+
+/**
+ * idpf_ptp_set_caps - Set PTP capabilities
+ * @adapter: Driver specific private structure
+ *
+ * This function sets the PTP functions.
+ */
+static void idpf_ptp_set_caps(const struct idpf_adapter *adapter)
+{
+ struct ptp_clock_info *info = &adapter->ptp->info;
+
+ snprintf(info->name, sizeof(info->name), "%s-%s-clk",
+ KBUILD_MODNAME, pci_name(adapter->pdev));
+
+ info->owner = THIS_MODULE;
+ info->max_adj = adapter->ptp->max_adj;
+ info->gettimex64 = idpf_ptp_gettimex64;
+ info->settime64 = idpf_ptp_settime64;
+ info->adjfine = idpf_ptp_adjfine;
+ info->adjtime = idpf_ptp_adjtime;
+ info->verify = idpf_ptp_verify_pin;
+ info->enable = idpf_ptp_gpio_enable;
+ info->do_aux_work = idpf_ptp_do_aux_work;
+}
+
+/**
+ * idpf_ptp_create_clock - Create PTP clock device for userspace
+ * @adapter: Driver specific private structure
+ *
+ * This function creates a new PTP clock device.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_create_clock(const struct idpf_adapter *adapter)
+{
+ struct ptp_clock *clock;
+
+ idpf_ptp_set_caps(adapter);
+
+ /* Attempt to register the clock before enabling the hardware. */
+ clock = ptp_clock_register(&adapter->ptp->info,
+ &adapter->pdev->dev);
+ if (IS_ERR(clock)) {
+ pci_err(adapter->pdev, "PTP clock creation failed: %pe\n",
+ clock);
+ return PTR_ERR(clock);
+ }
+
+ adapter->ptp->clock = clock;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_release_vport_tstamp - Release the Tx timestamps trakcers for a
+ * given vport.
+ * @vport: Virtual port structure
+ *
+ * Remove the queues and delete lists that tracks Tx timestamp entries for a
+ * given vport.
+ */
+static void idpf_ptp_release_vport_tstamp(struct idpf_vport *vport)
+{
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
+ struct list_head *head;
+
+ cancel_work_sync(&vport->tstamp_task);
+
+ /* Remove list with free latches */
+ spin_lock_bh(&vport->tx_tstamp_caps->latches_lock);
+
+ head = &vport->tx_tstamp_caps->latches_free;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ /* Remove list with latches in use */
+ head = &vport->tx_tstamp_caps->latches_in_use;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ spin_unlock_bh(&vport->tx_tstamp_caps->latches_lock);
+
+ kfree(vport->tx_tstamp_caps);
+ vport->tx_tstamp_caps = NULL;
+}
+
+/**
+ * idpf_ptp_release_tstamp - Release the Tx timestamps trackers
+ * @adapter: Driver specific private structure
+ *
+ * Remove the queues and delete lists that tracks Tx timestamp entries.
+ */
+static void idpf_ptp_release_tstamp(struct idpf_adapter *adapter)
+{
+ idpf_for_each_vport(adapter, vport) {
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport))
+ continue;
+
+ idpf_ptp_release_vport_tstamp(vport);
+ }
+}
+
+/**
+ * idpf_ptp_get_txq_tstamp_capability - Verify the timestamping capability
+ * for a given tx queue.
+ * @txq: Transmit queue
+ *
+ * Since performing timestamp flows requires reading the device clock value and
+ * the support in the Control Plane, the function checks both factors and
+ * summarizes the support for the timestamping.
+ *
+ * Return: true if the timestamping is supported, false otherwise.
+ */
+bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq)
+{
+ if (!txq || !txq->cached_tstamp_caps)
+ return false;
+ else if (txq->cached_tstamp_caps->access)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * idpf_ptp_init - Initialize PTP hardware clock support
+ * @adapter: Driver specific private structure
+ *
+ * Set up the device for interacting with the PTP hardware clock for all
+ * functions. Function will allocate and register a ptp_clock with the
+ * PTP_1588_CLOCK infrastructure.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_init(struct idpf_adapter *adapter)
+{
+ struct timespec64 ts;
+ int err;
+
+ if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP)) {
+ pci_dbg(adapter->pdev, "PTP capability is not detected\n");
+ return -EOPNOTSUPP;
+ }
+
+ adapter->ptp = kzalloc(sizeof(*adapter->ptp), GFP_KERNEL);
+ if (!adapter->ptp)
+ return -ENOMEM;
+
+ /* add a back pointer to adapter */
+ adapter->ptp->adapter = adapter;
+
+ if (adapter->dev_ops.reg_ops.ptp_reg_init)
+ adapter->dev_ops.reg_ops.ptp_reg_init(adapter);
+
+ err = idpf_ptp_get_caps(adapter);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to get PTP caps err %d\n", err);
+ goto free_ptp;
+ }
+
+ err = idpf_ptp_create_clock(adapter);
+ if (err)
+ goto free_ptp;
+
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_schedule_worker(adapter->ptp->clock, 0);
+
+ /* Write the default increment time value if the clock adjustments
+ * are enabled.
+ */
+ if (adapter->ptp->adj_dev_clk_time_access != IDPF_PTP_NONE) {
+ err = idpf_ptp_adj_dev_clk_fine(adapter,
+ adapter->ptp->base_incval);
+ if (err)
+ goto remove_clock;
+ }
+
+ /* Write the initial time value if the set time operation is enabled */
+ if (adapter->ptp->set_dev_clk_time_access != IDPF_PTP_NONE) {
+ ts = ktime_to_timespec64(ktime_get_real());
+ err = idpf_ptp_settime64(&adapter->ptp->info, &ts);
+ if (err)
+ goto remove_clock;
+ }
+
+ spin_lock_init(&adapter->ptp->read_dev_clk_lock);
+
+ pci_dbg(adapter->pdev, "PTP init successful\n");
+
+ return 0;
+
+remove_clock:
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_cancel_worker_sync(adapter->ptp->clock);
+
+ ptp_clock_unregister(adapter->ptp->clock);
+ adapter->ptp->clock = NULL;
+
+free_ptp:
+ kfree(adapter->ptp);
+ adapter->ptp = NULL;
+
+ return err;
+}
+
+/**
+ * idpf_ptp_release - Clear PTP hardware clock support
+ * @adapter: Driver specific private structure
+ */
+void idpf_ptp_release(struct idpf_adapter *adapter)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+
+ if (!ptp)
+ return;
+
+ if (ptp->tx_tstamp_access != IDPF_PTP_NONE &&
+ ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ idpf_ptp_release_tstamp(adapter);
+
+ if (ptp->clock) {
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_cancel_worker_sync(adapter->ptp->clock);
+
+ ptp_clock_unregister(ptp->clock);
+ }
+
+ kfree(ptp);
+ adapter->ptp = NULL;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.h b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
new file mode 100644
index 000000000000..a876749d6116
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _IDPF_PTP_H
+#define _IDPF_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+
+/**
+ * struct idpf_ptp_cmd - PTP command masks
+ * @exec_cmd_mask: mask to trigger command execution
+ * @shtime_enable_mask: mask to enable shadow time
+ */
+struct idpf_ptp_cmd {
+ u32 exec_cmd_mask;
+ u32 shtime_enable_mask;
+};
+
+/* struct idpf_ptp_dev_clk_regs - PTP device registers
+ * @dev_clk_ns_l: low part of the device clock register
+ * @dev_clk_ns_h: high part of the device clock register
+ * @phy_clk_ns_l: low part of the PHY clock register
+ * @phy_clk_ns_h: high part of the PHY clock register
+ * @incval_l: low part of the increment value register
+ * @incval_h: high part of the increment value register
+ * @shadj_l: low part of the shadow adjust register
+ * @shadj_h: high part of the shadow adjust register
+ * @phy_incval_l: low part of the PHY increment value register
+ * @phy_incval_h: high part of the PHY increment value register
+ * @phy_shadj_l: low part of the PHY shadow adjust register
+ * @phy_shadj_h: high part of the PHY shadow adjust register
+ * @cmd: PTP command register
+ * @phy_cmd: PHY command register
+ * @cmd_sync: PTP command synchronization register
+ */
+struct idpf_ptp_dev_clk_regs {
+ /* Main clock */
+ void __iomem *dev_clk_ns_l;
+ void __iomem *dev_clk_ns_h;
+
+ /* PHY timer */
+ void __iomem *phy_clk_ns_l;
+ void __iomem *phy_clk_ns_h;
+
+ /* Main timer adjustments */
+ void __iomem *incval_l;
+ void __iomem *incval_h;
+ void __iomem *shadj_l;
+ void __iomem *shadj_h;
+
+ /* PHY timer adjustments */
+ void __iomem *phy_incval_l;
+ void __iomem *phy_incval_h;
+ void __iomem *phy_shadj_l;
+ void __iomem *phy_shadj_h;
+
+ /* Command */
+ void __iomem *cmd;
+ void __iomem *phy_cmd;
+ void __iomem *cmd_sync;
+};
+
+/**
+ * enum idpf_ptp_access - the type of access to PTP operations
+ * @IDPF_PTP_NONE: no access
+ * @IDPF_PTP_DIRECT: direct access through BAR registers
+ * @IDPF_PTP_MAILBOX: access through mailbox messages
+ */
+enum idpf_ptp_access {
+ IDPF_PTP_NONE = 0,
+ IDPF_PTP_DIRECT,
+ IDPF_PTP_MAILBOX,
+};
+
+/**
+ * struct idpf_ptp_secondary_mbx - PTP secondary mailbox
+ * @peer_mbx_q_id: PTP mailbox queue ID
+ * @peer_id: Peer ID for PTP Device Control daemon
+ * @valid: indicates whether secondary mailblox is supported by the Control
+ * Plane
+ */
+struct idpf_ptp_secondary_mbx {
+ u16 peer_mbx_q_id;
+ u16 peer_id;
+ bool valid:1;
+};
+
+/**
+ * enum idpf_ptp_tx_tstamp_state - Tx timestamp states
+ * @IDPF_PTP_FREE: Tx timestamp index free to use
+ * @IDPF_PTP_REQUEST: Tx timestamp index set to the Tx descriptor
+ * @IDPF_PTP_READ_VALUE: Tx timestamp value ready to be read
+ */
+enum idpf_ptp_tx_tstamp_state {
+ IDPF_PTP_FREE,
+ IDPF_PTP_REQUEST,
+ IDPF_PTP_READ_VALUE,
+};
+
+/**
+ * struct idpf_ptp_tx_tstamp_status - Parameters to track Tx timestamp
+ * @skb: the pointer to the SKB that received the completion tag
+ * @state: the state of the Tx timestamp
+ */
+struct idpf_ptp_tx_tstamp_status {
+ struct sk_buff *skb;
+ enum idpf_ptp_tx_tstamp_state state;
+};
+
+/**
+ * struct idpf_ptp_tx_tstamp - Parameters for Tx timestamping
+ * @list_member: the list member structure
+ * @tx_latch_reg_offset_l: Tx tstamp latch low register offset
+ * @tx_latch_reg_offset_h: Tx tstamp latch high register offset
+ * @skb: the pointer to the SKB for this timestamp request
+ * @tstamp: the Tx tstamp value
+ * @idx: the index of the Tx tstamp
+ */
+struct idpf_ptp_tx_tstamp {
+ struct list_head list_member;
+ u32 tx_latch_reg_offset_l;
+ u32 tx_latch_reg_offset_h;
+ struct sk_buff *skb;
+ u64 tstamp;
+ u32 idx;
+};
+
+/**
+ * struct idpf_ptp_vport_tx_tstamp_caps - Tx timestamp capabilities
+ * @vport_id: the vport id
+ * @num_entries: the number of negotiated Tx timestamp entries
+ * @tstamp_ns_lo_bit: first bit for nanosecond part of the timestamp
+ * @latches_lock: the lock to the lists of free/used timestamp indexes
+ * @status_lock: the lock to the status tracker
+ * @access: indicates an access to Tx timestamp
+ * @latches_free: the list of the free Tx timestamps latches
+ * @latches_in_use: the list of the used Tx timestamps latches
+ * @tx_tstamp_status: Tx tstamp status tracker
+ */
+struct idpf_ptp_vport_tx_tstamp_caps {
+ u32 vport_id;
+ u16 num_entries;
+ u16 tstamp_ns_lo_bit;
+ spinlock_t latches_lock;
+ spinlock_t status_lock;
+ bool access:1;
+ struct list_head latches_free;
+ struct list_head latches_in_use;
+ struct idpf_ptp_tx_tstamp_status tx_tstamp_status[];
+};
+
+/**
+ * struct idpf_ptp - PTP parameters
+ * @info: structure defining PTP hardware capabilities
+ * @clock: pointer to registered PTP clock device
+ * @adapter: back pointer to the adapter
+ * @base_incval: base increment value of the PTP clock
+ * @max_adj: maximum adjustment of the PTP clock
+ * @cmd: HW specific command masks
+ * @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @cached_phc_jiffies: jiffies when cached_phc_time was last updated
+ * @dev_clk_regs: the set of registers to access the device clock
+ * @caps: PTP capabilities negotiated with the Control Plane
+ * @get_dev_clk_time_access: access type for getting the device clock time
+ * @set_dev_clk_time_access: access type for setting the device clock time
+ * @adj_dev_clk_time_access: access type for the adjusting the device clock
+ * @tx_tstamp_access: access type for the Tx timestamp value read
+ * @rsv: reserved bits
+ * @secondary_mbx: parameters for using dedicated PTP mailbox
+ * @read_dev_clk_lock: spinlock protecting access to the device clock read
+ * operation executed by the HW latch
+ */
+struct idpf_ptp {
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct idpf_adapter *adapter;
+ u64 base_incval;
+ u64 max_adj;
+ struct idpf_ptp_cmd cmd;
+ u64 cached_phc_time;
+ unsigned long cached_phc_jiffies;
+ struct idpf_ptp_dev_clk_regs dev_clk_regs;
+ u32 caps;
+ enum idpf_ptp_access get_dev_clk_time_access:2;
+ enum idpf_ptp_access set_dev_clk_time_access:2;
+ enum idpf_ptp_access adj_dev_clk_time_access:2;
+ enum idpf_ptp_access tx_tstamp_access:2;
+ u8 rsv;
+ struct idpf_ptp_secondary_mbx secondary_mbx;
+ spinlock_t read_dev_clk_lock;
+};
+
+/**
+ * idpf_ptp_info_to_adapter - get driver adapter struct from ptp_clock_info
+ * @info: pointer to ptp_clock_info struct
+ *
+ * Return: pointer to the corresponding adapter struct
+ */
+static inline struct idpf_adapter *
+idpf_ptp_info_to_adapter(const struct ptp_clock_info *info)
+{
+ const struct idpf_ptp *ptp = container_of_const(info, struct idpf_ptp,
+ info);
+ return ptp->adapter;
+}
+
+/**
+ * struct idpf_ptp_dev_timers - System time and device time values
+ * @sys_time_ns: system time value expressed in nanoseconds
+ * @dev_clk_time_ns: device clock time value expressed in nanoseconds
+ */
+struct idpf_ptp_dev_timers {
+ u64 sys_time_ns;
+ u64 dev_clk_time_ns;
+};
+
+/**
+ * idpf_ptp_is_vport_tx_tstamp_ena - Verify the Tx timestamping enablement for
+ * a given vport.
+ * @vport: Virtual port structure
+ *
+ * Tx timestamp capabilities are negotiated with the Control Plane only if the
+ * device clock value can be read, Tx timestamp access type is different than
+ * NONE, and the PTP clock for the adapter is created. When all those conditions
+ * are satisfied, Tx timestamp feature is enabled and tx_tstamp_caps is
+ * allocated and fulfilled.
+ *
+ * Return: true if the Tx timestamping is enabled, false otherwise.
+ */
+static inline bool idpf_ptp_is_vport_tx_tstamp_ena(struct idpf_vport *vport)
+{
+ if (!vport->tx_tstamp_caps)
+ return false;
+ else
+ return true;
+}
+
+/**
+ * idpf_ptp_is_vport_rx_tstamp_ena - Verify the Rx timestamping enablement for
+ * a given vport.
+ * @vport: Virtual port structure
+ *
+ * Rx timestamp feature is enabled if the PTP clock for the adapter is created
+ * and it is possible to read the value of the device clock. The second
+ * assumption comes from the need to extend the Rx timestamp value to 64 bit
+ * based on the current device clock time.
+ *
+ * Return: true if the Rx timestamping is enabled, false otherwise.
+ */
+static inline bool idpf_ptp_is_vport_rx_tstamp_ena(struct idpf_vport *vport)
+{
+ if (!vport->adapter->ptp ||
+ vport->adapter->ptp->get_dev_clk_time_access == IDPF_PTP_NONE)
+ return false;
+ else
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int idpf_ptp_init(struct idpf_adapter *adapter);
+void idpf_ptp_release(struct idpf_adapter *adapter);
+int idpf_ptp_get_caps(struct idpf_adapter *adapter);
+void idpf_ptp_get_features_access(const struct idpf_adapter *adapter);
+bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq);
+int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time);
+int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time);
+int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval);
+int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta);
+int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport);
+int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport);
+int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config);
+u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp);
+u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp);
+int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ u32 *idx);
+void idpf_tstamp_task(struct work_struct *work);
+#else /* CONFIG_PTP_1588_CLOCK */
+static inline int idpf_ptp_init(struct idpf_adapter *adapter)
+{
+ return 0;
+}
+
+static inline void idpf_ptp_release(struct idpf_adapter *adapter) { }
+
+static inline int idpf_ptp_get_caps(struct idpf_adapter *adapter)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+idpf_ptp_get_features_access(const struct idpf_adapter *adapter) { }
+
+static inline bool
+idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq)
+{
+ return false;
+}
+
+static inline int
+idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter,
+ u64 time)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter,
+ u64 incval)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter,
+ s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u32 in_tstamp)
+{
+ return 0;
+}
+
+static inline u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time,
+ u32 in_timestamp)
+{
+ return 0;
+}
+
+static inline int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q,
+ struct sk_buff *skb, u32 *idx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void idpf_tstamp_task(struct work_struct *work) { }
+#endif /* CONFIG_PTP_1588_CLOCK */
+#endif /* _IDPF_PTP_H */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index dfd7cf1d9aa0..993c354aa27a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -362,17 +362,18 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
+ int csum, tso, needed;
unsigned int count;
__be16 protocol;
- int csum, tso;
count = idpf_tx_desc_count_required(tx_q, skb);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
- if (idpf_tx_maybe_stop_common(tx_q,
- count + IDPF_TX_DESCS_PER_CACHE_LINE +
- IDPF_TX_DESCS_FOR_CTX)) {
+ needed = count + IDPF_TX_DESCS_PER_CACHE_LINE + IDPF_TX_DESCS_FOR_CTX;
+ if (!netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ IDPF_DESC_UNUSED(tx_q),
+ needed, needed)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
u64_stats_update_begin(&tx_q->stats_sync);
@@ -595,7 +596,7 @@ static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc)
*/
static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
struct sk_buff *skb,
- struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_csum csum_bits,
struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
@@ -661,10 +662,10 @@ checksum_fail:
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
+ struct libeth_rx_csum csum_bits = { };
u32 rx_error, rx_status;
u64 qword;
@@ -696,10 +697,10 @@ idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
+ struct libeth_rx_csum csum_bits = { };
u16 rx_status0, rx_status1;
rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0);
@@ -798,7 +799,7 @@ idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
u16 ptype)
{
struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype];
- struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_csum csum_bits;
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_q->netdev);
@@ -900,14 +901,14 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
*/
static void
idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields)
{
u64 qword;
qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
- fields->size = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
- fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
+ fields->len = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
+ fields->ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
}
/**
@@ -923,12 +924,12 @@ idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
*/
static void
idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields)
{
- fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
- le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
- fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
- le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
+ fields->len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
+ fields->ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
}
/**
@@ -941,7 +942,7 @@ idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
static void
idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields)
{
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
idpf_rx_singleq_extract_base_fields(rx_desc, fields);
@@ -966,7 +967,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
/* Process Rx packets bounded by budget */
while (likely(total_rx_pkts < (unsigned int)budget)) {
- struct idpf_rx_extracted fields = { };
+ struct libeth_rqe_info fields = { };
union virtchnl2_rx_desc *rx_desc;
struct idpf_rx_buf *rx_buf;
@@ -992,13 +993,13 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
rx_buf = &rx_q->rx_buf[ntc];
- if (!libeth_rx_sync_for_cpu(rx_buf, fields.size))
+ if (!libeth_rx_sync_for_cpu(rx_buf, fields.len))
goto skip_data;
if (skb)
- idpf_rx_add_frag(rx_buf, skb, fields.size);
+ idpf_rx_add_frag(rx_buf, skb, fields.len);
else
- skb = idpf_rx_build_skb(rx_buf, fields.size);
+ skb = idpf_rx_build_skb(rx_buf, fields.len);
/* exit if we failed to retrieve a buffer */
if (!skb)
@@ -1033,8 +1034,8 @@ skip_data:
total_rx_bytes += skb->len;
/* protocol */
- idpf_rx_singleq_process_skb_fields(rx_q, skb,
- rx_desc, fields.rx_ptype);
+ idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc,
+ fields.ptype);
/* send completed skb up the stack */
napi_gro_receive(rx_q->pp->p.napi, skb);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index da2a5becf62f..5cf440e09d0a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -5,6 +5,7 @@
#include <net/libeth/tx.h>
#include "idpf.h"
+#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
struct idpf_tx_stash {
@@ -1107,6 +1108,8 @@ void idpf_vport_queues_rel(struct idpf_vport *vport)
*/
static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
{
+ struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
+ struct work_struct *tstamp_task = &vport->tstamp_task;
int i, j, k = 0;
vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
@@ -1121,6 +1124,12 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
for (j = 0; j < tx_grp->num_txq; j++, k++) {
vport->txqs[k] = tx_grp->txqs[j];
vport->txqs[k]->idx = k;
+
+ if (!caps)
+ continue;
+
+ vport->txqs[k]->cached_tstamp_caps = caps;
+ vport->txqs[k]->tstamp_task = tstamp_task;
}
}
@@ -1655,6 +1664,40 @@ static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
}
/**
+ * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value
+ * @txq: queue to read the timestamp from
+ * @skb: socket buffer to provide Tx timestamp value
+ *
+ * Schedule a work to read Tx timestamp value generated once the packet is
+ * transmitted.
+ */
+static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
+{
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct idpf_ptp_tx_tstamp_status *tx_tstamp_status;
+
+ tx_tstamp_caps = txq->cached_tstamp_caps;
+ spin_lock_bh(&tx_tstamp_caps->status_lock);
+
+ for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) {
+ tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i];
+ if (tx_tstamp_status->state != IDPF_PTP_FREE)
+ continue;
+
+ tx_tstamp_status->skb = skb;
+ tx_tstamp_status->state = IDPF_PTP_REQUEST;
+
+ /* Fetch timestamp from completion descriptor through
+ * virtchnl msg to report to stack.
+ */
+ queue_work(system_unbound_wq, txq->tstamp_task);
+ break;
+ }
+
+ spin_unlock_bh(&tx_tstamp_caps->status_lock);
+}
+
+/**
* idpf_tx_clean_stashed_bufs - clean bufs that were stored for
* out of order completions
* @txq: queue to clean
@@ -1682,6 +1725,11 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
continue;
hash_del(&stash->hlist);
+
+ if (stash->buf.type == LIBETH_SQE_SKB &&
+ (skb_shinfo(stash->buf.skb)->tx_flags & SKBTX_IN_PROGRESS))
+ idpf_tx_read_tstamp(txq, stash->buf.skb);
+
libeth_tx_complete(&stash->buf, &cp);
/* Push shadow buf back onto stack */
@@ -1876,8 +1924,12 @@ static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
return false;
- if (tx_buf->type == LIBETH_SQE_SKB)
+ if (tx_buf->type == LIBETH_SQE_SKB) {
+ if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ idpf_tx_read_tstamp(txq, tx_buf->skb);
+
libeth_tx_complete(tx_buf, &cp);
+ }
idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
@@ -2127,11 +2179,24 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size)
{
- desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
+ *(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd);
desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
}
+/* Global conditions to tell whether the txq (and related resources)
+ * has room to allow the use of "size" descriptors.
+ */
+static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
+{
+ if (IDPF_DESC_UNUSED(tx_q) < size ||
+ IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
+ IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
+ IDPF_TX_BUF_RSV_LOW(tx_q))
+ return 0;
+ return 1;
+}
+
/**
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
@@ -2142,29 +2207,11 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
unsigned int descs_needed)
{
- if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
- goto out;
-
- /* If there are too many outstanding completions expected on the
- * completion queue, stop the TX queue to give the device some time to
- * catch up
- */
- if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
- IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
- goto splitq_stop;
-
- /* Also check for available book keeping buffers; if we are low, stop
- * the queue to wait for more completions
- */
- if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
- goto splitq_stop;
-
- return 0;
-
-splitq_stop:
- netif_stop_subqueue(tx_q->netdev, tx_q->idx);
+ if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ idpf_txq_has_room(tx_q, descs_needed),
+ 1, 1))
+ return 0;
-out:
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
@@ -2190,12 +2237,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val;
- if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
- u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.q_busy);
- u64_stats_update_end(&tx_q->stats_sync);
- }
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2296,7 +2337,7 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
* descriptor. Reset that here.
*/
tx_desc = &txq->flex_tx[idx];
- memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
+ memset(tx_desc, 0, sizeof(*tx_desc));
if (idx == 0)
idx = txq->desc_count;
idx--;
@@ -2448,6 +2489,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
* rest of the packet.
*/
tx_buf->type = LIBETH_SQE_EMPTY;
+ idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
@@ -2698,10 +2740,10 @@ static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
* Since the TX buffer rings mimics the descriptor ring, update the tx buffer
* ring entry to reflect that this index is a context descriptor
*/
-static struct idpf_flex_tx_ctx_desc *
+static union idpf_flex_tx_ctx_desc *
idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
{
- struct idpf_flex_tx_ctx_desc *desc;
+ union idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
txq->tx_buf[i].type = LIBETH_SQE_CTX;
@@ -2731,6 +2773,73 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
return NETDEV_TX_OK;
}
+#if (IS_ENABLED(CONFIG_PTP_1588_CLOCK))
+/**
+ * idpf_tx_tstamp - set up context descriptor for hardware timestamp
+ * @tx_q: queue to send buffer on
+ * @skb: pointer to the SKB we're sending
+ * @off: pointer to the offload struct
+ *
+ * Return: Positive index number on success, negative otherwise.
+ */
+static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ struct idpf_tx_offload_params *off)
+{
+ int err, idx;
+
+ /* only timestamp the outbound packet if the user has requested it */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return -1;
+
+ if (!idpf_ptp_get_txq_tstamp_capability(tx_q))
+ return -1;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (off->tx_flags & IDPF_TX_FLAGS_TSO)
+ return -1;
+
+ /* Grab an open timestamp slot */
+ err = idpf_ptp_request_ts(tx_q, skb, &idx);
+ if (err) {
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tstamp_skipped);
+ u64_stats_update_end(&tx_q->stats_sync);
+
+ return -1;
+ }
+
+ off->tx_flags |= IDPF_TX_FLAGS_TSYN;
+
+ return idx;
+}
+
+/**
+ * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate
+ * PHY Tx timestamp
+ * @ctx_desc: Context descriptor
+ * @idx: Index of the Tx timestamp latch
+ */
+static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
+ u32 idx)
+{
+ ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX,
+ IDPF_TX_CTX_DTYPE_M) |
+ le64_encode_bits(IDPF_TX_CTX_DESC_TSYN,
+ IDPF_TX_CTX_CMD_M) |
+ le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M);
+}
+#else /* CONFIG_PTP_1588_CLOCK */
+static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ struct idpf_tx_offload_params *off)
+{
+ return -1;
+}
+
+static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
+ u32 idx)
+{ }
+#endif /* CONFIG_PTP_1588_CLOCK */
+
/**
* idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
* @skb: send buffer
@@ -2742,9 +2851,10 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q)
{
struct idpf_tx_splitq_params tx_params = { };
+ union idpf_flex_tx_ctx_desc *ctx_desc;
struct idpf_tx_buf *first;
unsigned int count;
- int tso;
+ int tso, idx;
count = idpf_tx_desc_count_required(tx_q, skb);
if (unlikely(!count))
@@ -2764,8 +2874,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
if (tso) {
/* If tso is needed, set up context desc */
- struct idpf_flex_tx_ctx_desc *ctx_desc =
- idpf_tx_splitq_get_ctx_desc(tx_q);
+ ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
ctx_desc->tso.qw1.cmd_dtype =
cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
@@ -2783,6 +2892,12 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
u64_stats_update_end(&tx_q->stats_sync);
}
+ idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload);
+ if (idx != -1) {
+ ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
+ idpf_tx_set_tstamp_desc(ctx_desc, idx);
+ }
+
/* record the location of the first descriptor for this packet */
first = &tx_q->tx_buf[tx_q->next_to_use];
first->skb = skb;
@@ -2894,7 +3009,7 @@ idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
* skb->protocol must be set before this function is called
*/
static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_csum csum_bits,
struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
@@ -2922,7 +3037,7 @@ static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
if (unlikely(csum_bits.l4e))
goto checksum_fail;
- if (csum_bits.raw_csum_inv ||
+ if (!csum_bits.raw_csum_valid ||
decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
@@ -2945,10 +3060,10 @@ checksum_fail:
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum = { };
+ struct libeth_rx_csum csum = { };
u8 qword0, qword1;
qword0 = rx_desc->status_err0_qw0;
@@ -2964,9 +3079,9 @@ idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *
qword1);
csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
qword0);
- csum.raw_csum_inv =
- le16_get_bits(rx_desc->ptype_err_fflags0,
- VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
+ csum.raw_csum_valid =
+ !le16_get_bits(rx_desc->ptype_err_fflags0,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
return csum;
@@ -3007,14 +3122,11 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
return -EINVAL;
rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
- if (unlikely(rsc_segments == 1))
- return 0;
NAPI_GRO_CB(skb)->count = rsc_segments;
skb_shinfo(skb)->gso_size = rsc_seg_len;
skb_reset_network_header(skb);
- len = skb->len - skb_transport_offset(skb);
if (ipv4) {
struct iphdr *ipv4h = ip_hdr(skb);
@@ -3023,6 +3135,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
/* Reset and set transport header offset in skb */
skb_set_transport_header(skb, sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
/* Compute the TCP pseudo header checksum*/
tcp_hdr(skb)->check =
@@ -3032,6 +3145,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
tcp_hdr(skb)->check =
~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
}
@@ -3046,6 +3160,33 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
}
/**
+ * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack
+ * @rxq: pointer to the rx queue that receives the timestamp
+ * @rx_desc: pointer to rx descriptor containing timestamp
+ * @skb: skb to put timestamp in
+ */
+static void
+idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct sk_buff *skb)
+{
+ u64 cached_time, ts_ns;
+ u32 ts_high;
+
+ if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
+ return;
+
+ cached_time = READ_ONCE(rxq->cached_phc_time);
+
+ ts_high = le32_to_cpu(rx_desc->ts_high);
+ ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
+
+ *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
+ .hwtstamp = ns_to_ktime(ts_ns),
+ };
+}
+
+/**
* idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
* @rxq: Rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being populated
@@ -3059,7 +3200,7 @@ static int
idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_csum csum_bits;
struct libeth_rx_pt decoded;
u16 rx_ptype;
@@ -3070,7 +3211,11 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
/* process RSS/hash */
idpf_rx_hash(rxq, skb, rx_desc, decoded);
+ if (idpf_queue_has(PTP, rxq))
+ idpf_rx_hwtstamp(rxq, rx_desc, skb);
+
skb->protocol = eth_type_trans(skb, rxq->netdev);
+ skb_record_rx_queue(skb, rxq->idx);
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
@@ -3079,8 +3224,6 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
idpf_rx_csum(rxq, skb, csum_bits, decoded);
- skb_record_rx_queue(skb, rxq->idx);
-
return 0;
}
@@ -3553,8 +3696,6 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
q_vector->tx = NULL;
kfree(q_vector->rx);
q_vector->rx = NULL;
-
- free_cpumask_var(q_vector->affinity_mask);
}
kfree(vport->q_vectors);
@@ -3581,8 +3722,6 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
kfree(free_irq(irq_num, q_vector));
}
}
@@ -3603,21 +3742,31 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
/**
* idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
* @q_vector: pointer to q_vector
- * @type: itr index
- * @itr: itr value
*/
-static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector,
- const int type, u16 itr)
+static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
{
- u32 itr_val;
+ u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m;
+ int type = IDPF_NO_ITR_UPDATE_IDX;
+ u16 itr = 0;
+
+ if (q_vector->wb_on_itr) {
+ /*
+ * Trigger a software interrupt when exiting wb_on_itr, to make
+ * sure we catch any pending write backs that might have been
+ * missed due to interrupt state transition.
+ */
+ itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m |
+ q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m;
+ type = IDPF_SW_ITR_UPDATE_IDX;
+ itr = IDPF_ITR_20K;
+ }
itr &= IDPF_ITR_MASK;
/* Don't clear PBA because that can cause lost interrupts that
* came in while we were cleaning/polling
*/
- itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
- (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
- (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
+ itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
+ (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
return itr_val;
}
@@ -3715,9 +3864,8 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/* net_dim() updates ITR out-of-band using a work item */
idpf_net_dim(q_vector);
+ intval = idpf_vport_intr_buildreg_itr(q_vector);
q_vector->wb_on_itr = false;
- intval = idpf_vport_intr_buildreg_itr(q_vector,
- IDPF_NO_ITR_UPDATE_IDX, 0);
writel(intval, q_vector->intr_reg.dyn_ctl);
}
@@ -3761,8 +3909,6 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
- /* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
}
return 0;
@@ -4023,6 +4169,14 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
return budget;
}
+ /* Switch to poll mode in the tear-down path after sending disable
+ * queues virtchnl message, as the interrupts will be disabled after
+ * that.
+ */
+ if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
+ q_vector->tx[0])))
+ return budget;
+
work_done = min_t(int, work_done, budget - 1);
/* Exit the polling mode, but don't re-enable interrupts if stack might
@@ -4033,15 +4187,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
else
idpf_vport_intr_set_wb_on_itr(q_vector);
- /* Switch to poll mode in the tear-down path after sending disable
- * queues virtchnl message, as the interrupts will be disabled after
- * that
- */
- if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
- q_vector->tx[0])))
- return budget;
- else
- return work_done;
+ return work_done;
}
/**
@@ -4174,7 +4320,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
{
int (*napi_poll)(struct napi_struct *napi, int budget);
- u16 v_idx;
+ u16 v_idx, qv_idx;
+ int irq_num;
if (idpf_is_queue_model_split(vport->txq_model))
napi_poll = idpf_vport_splitq_napi_poll;
@@ -4183,12 +4330,12 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ qv_idx = vport->q_vector_idxs[v_idx];
+ irq_num = vport->adapter->msix_entries[qv_idx].vector;
- netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
-
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, q_vector->affinity_mask);
+ netif_napi_add_config(vport->netdev, &q_vector->napi,
+ napi_poll, v_idx);
+ netif_napi_set_irq(&q_vector->napi, irq_num);
}
}
@@ -4232,9 +4379,6 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
- if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
- goto error;
-
q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
GFP_KERNEL);
if (!q_vector->tx)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 9c1fe84108ed..36a0f828a6f8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -142,6 +142,7 @@ do { \
#define IDPF_TX_FLAGS_IPV4 BIT(1)
#define IDPF_TX_FLAGS_IPV6 BIT(2)
#define IDPF_TX_FLAGS_TUNNEL BIT(3)
+#define IDPF_TX_FLAGS_TSYN BIT(4)
union idpf_tx_flex_desc {
struct idpf_flex_tx_desc q; /* queue based scheduling */
@@ -213,25 +214,6 @@ enum idpf_tx_ctx_desc_eipt_offload {
IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
};
-/* Checksum offload bits decoded from the receive descriptor. */
-struct idpf_rx_csum_decoded {
- u32 l3l4p : 1;
- u32 ipe : 1;
- u32 eipe : 1;
- u32 eudpe : 1;
- u32 ipv6exadd : 1;
- u32 l4e : 1;
- u32 pprs : 1;
- u32 nat : 1;
- u32 raw_csum_inv : 1;
- u32 raw_csum : 16;
-};
-
-struct idpf_rx_extracted {
- unsigned int size;
- u16 rx_ptype;
-};
-
#define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
#define IDPF_TX_MIN_PKT_LEN 17
#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
@@ -308,6 +290,8 @@ struct idpf_ptype_state {
* @__IDPF_Q_POLL_MODE: Enable poll mode
* @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
* @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
+ * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
+ * queue
* @__IDPF_Q_FLAGS_NBITS: Must be last
*/
enum idpf_queue_flags_t {
@@ -318,6 +302,7 @@ enum idpf_queue_flags_t {
__IDPF_Q_POLL_MODE,
__IDPF_Q_CRC_EN,
__IDPF_Q_HSPLIT_EN,
+ __IDPF_Q_PTP,
__IDPF_Q_FLAGS_NBITS,
};
@@ -354,6 +339,8 @@ struct idpf_vec_regs {
* @dyn_ctl_itridx_m: Mask for ITR index
* @dyn_ctl_intrvl_s: Register bit offset for ITR interval
* @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
+ * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
+ * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
* @rx_itr: RX ITR register
* @tx_itr: TX ITR register
* @icr_ena: Interrupt cause register offset
@@ -367,6 +354,8 @@ struct idpf_intr_reg {
u32 dyn_ctl_itridx_m;
u32 dyn_ctl_intrvl_s;
u32 dyn_ctl_wb_on_itr_m;
+ u32 dyn_ctl_sw_itridx_ena_m;
+ u32 dyn_ctl_swint_trig_m;
void __iomem *rx_itr;
void __iomem *tx_itr;
void __iomem *icr_ena;
@@ -397,7 +386,6 @@ struct idpf_intr_reg {
* @rx_intr_mode: Dynamic ITR or not
* @rx_itr_idx: RX ITR index
* @v_idx: Vector index
- * @affinity_mask: CPU affinity mask
*/
struct idpf_q_vector {
__cacheline_group_begin_aligned(read_mostly);
@@ -434,13 +422,12 @@ struct idpf_q_vector {
__cacheline_group_begin_aligned(cold);
u16 v_idx;
- cpumask_var_t affinity_mask;
__cacheline_group_end_aligned(cold);
};
-libeth_cacheline_set_assert(struct idpf_q_vector, 112,
+libeth_cacheline_set_assert(struct idpf_q_vector, 120,
24 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
- 8 + sizeof(cpumask_var_t));
+ 8);
struct idpf_rx_queue_stats {
u64_stats_t packets;
@@ -460,6 +447,7 @@ struct idpf_tx_queue_stats {
u64_stats_t q_busy;
u64_stats_t skb_drops;
u64_stats_t dma_map_errs;
+ u64_stats_t tstamp_skipped;
};
#define IDPF_ITR_DYNAMIC 1
@@ -471,6 +459,8 @@ struct idpf_tx_queue_stats {
#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
#define IDPF_ITR_TX_DEF IDPF_ITR_20K
#define IDPF_ITR_RX_DEF IDPF_ITR_20K
+/* Index used for 'SW ITR' update in DYN_CTL register */
+#define IDPF_SW_ITR_UPDATE_IDX 2
/* Index used for 'No ITR' update in DYN_CTL register */
#define IDPF_NO_ITR_UPDATE_IDX 3
#define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt)
@@ -509,6 +499,7 @@ struct idpf_txq_stash {
* @next_to_alloc: RX buffer to allocate at
* @skb: Pointer to the skb
* @truesize: data buffer truesize in singleq
+ * @cached_phc_time: Cached PHC time for the Rx queue
* @stats_sync: See struct u64_stats_sync
* @q_stats: See union idpf_rx_queue_stats
* @q_id: Queue id
@@ -556,6 +547,7 @@ struct idpf_rx_queue {
struct sk_buff *skb;
u32 truesize;
+ u64 cached_phc_time;
struct u64_stats_sync stats_sync;
struct idpf_rx_queue_stats q_stats;
@@ -575,7 +567,7 @@ struct idpf_rx_queue {
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
- 80 + sizeof(struct u64_stats_sync),
+ 88 + sizeof(struct u64_stats_sync),
32);
/**
@@ -632,6 +624,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_cur_gen: Used to keep track of current completion tag generation
* @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
+ * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
+ * @tstamp_task: Work that handles Tx timestamp read
* @stats_sync: See struct u64_stats_sync
* @q_stats: See union idpf_tx_queue_stats
* @q_id: Queue id
@@ -645,7 +639,7 @@ struct idpf_tx_queue {
struct idpf_base_tx_desc *base_tx;
struct idpf_base_tx_ctx_desc *base_ctx;
union idpf_tx_flex_desc *flex_tx;
- struct idpf_flex_tx_ctx_desc *flex_ctx;
+ union idpf_flex_tx_ctx_desc *flex_ctx;
void *desc_ring;
};
@@ -681,6 +675,9 @@ struct idpf_tx_queue {
u16 compl_tag_cur_gen;
u16 compl_tag_gen_max;
+ struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
+ struct work_struct *tstamp_task;
+
struct u64_stats_sync stats_sync;
struct idpf_tx_queue_stats q_stats;
__cacheline_group_end_aligned(read_write);
@@ -694,7 +691,7 @@ struct idpf_tx_queue {
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
- 88 + sizeof(struct u64_stats_sync),
+ 112 + sizeof(struct u64_stats_sync),
24);
/**
@@ -934,7 +931,7 @@ static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
if (!q_vector)
return NUMA_NO_NODE;
- cpu = cpumask_first(q_vector->affinity_mask);
+ cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
}
@@ -1052,12 +1049,4 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
-static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
- u32 needed)
-{
- return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
- IDPF_DESC_UNUSED(tx_q),
- needed, needed);
-}
-
#endif /* !_IDPF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index aad62e270ae4..aba828abcb17 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -101,6 +101,9 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = VF_INT_DYN_CTLN_INTERVAL_S;
intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M;
+ intr->dyn_ctl_swint_trig_m = VF_INT_DYN_CTLN_SWINT_TRIG_M;
+ intr->dyn_ctl_sw_itridx_ena_m =
+ VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_VF_ITR_IDX_SPACING);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index d46c95f91b0d..24febaaa8fbb 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -5,88 +5,7 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
-
-#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
-#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
-#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
-#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
-#define IDPF_VC_XN_RING_LEN U8_MAX
-
-/**
- * enum idpf_vc_xn_state - Virtchnl transaction status
- * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
- * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
- * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
- * buffer updated
- * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
- * was an error, buffer not updated
- * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
- * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
- * return context; a callback may be provided to handle
- * return
- */
-enum idpf_vc_xn_state {
- IDPF_VC_XN_IDLE = 1,
- IDPF_VC_XN_WAITING,
- IDPF_VC_XN_COMPLETED_SUCCESS,
- IDPF_VC_XN_COMPLETED_FAILED,
- IDPF_VC_XN_SHUTDOWN,
- IDPF_VC_XN_ASYNC,
-};
-
-struct idpf_vc_xn;
-/* Callback for asynchronous messages */
-typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
- const struct idpf_ctlq_msg *);
-
-/**
- * struct idpf_vc_xn - Data structure representing virtchnl transactions
- * @completed: virtchnl event loop uses that to signal when a reply is
- * available, uses kernel completion API
- * @state: virtchnl event loop stores the data below, protected by the
- * completion's lock.
- * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
- * truncated on its way to the receiver thread according to
- * reply_buf.iov_len.
- * @reply: Reference to the buffer(s) where the reply data should be written
- * to. May be 0-length (then NULL address permitted) if the reply data
- * should be ignored.
- * @async_handler: if sent asynchronously, a callback can be provided to handle
- * the reply when it's received
- * @vc_op: corresponding opcode sent with this transaction
- * @idx: index used as retrieval on reply receive, used for cookie
- * @salt: changed every message to make unique, used for cookie
- */
-struct idpf_vc_xn {
- struct completion completed;
- enum idpf_vc_xn_state state;
- size_t reply_sz;
- struct kvec reply;
- async_vc_cb async_handler;
- u32 vc_op;
- u8 idx;
- u8 salt;
-};
-
-/**
- * struct idpf_vc_xn_params - Parameters for executing transaction
- * @send_buf: kvec for send buffer
- * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
- * @timeout_ms: timeout to wait for reply
- * @async: send message asynchronously, will not wait on completion
- * @async_handler: If sent asynchronously, optional callback handler. The user
- * must be careful when using async handlers as the memory for
- * the recv_buf _cannot_ be on stack if this is async.
- * @vc_op: virtchnl op to send
- */
-struct idpf_vc_xn_params {
- struct kvec send_buf;
- struct kvec recv_buf;
- int timeout_ms;
- bool async;
- async_vc_cb async_handler;
- u32 vc_op;
-};
+#include "idpf_ptp.h"
/**
* struct idpf_vc_xn_manager - Manager for tracking transactions
@@ -235,6 +154,55 @@ err_kfree:
return err;
}
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+/**
+ * idpf_ptp_is_mb_msg - Check if the message is PTP-related
+ * @op: virtchnl opcode
+ *
+ * Return: true if msg is PTP-related, false otherwise.
+ */
+static bool idpf_ptp_is_mb_msg(u32 op)
+{
+ switch (op) {
+ case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_GET_CROSS_TIME:
+ case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE:
+ case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS:
+ case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * idpf_prepare_ptp_mb_msg - Prepare PTP related message
+ *
+ * @adapter: Driver specific private structure
+ * @op: virtchnl opcode
+ * @ctlq_msg: Corresponding control queue message
+ */
+static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
+ struct idpf_ctlq_msg *ctlq_msg)
+{
+ /* If the message is PTP-related and the secondary mailbox is available,
+ * send the message through the secondary mailbox.
+ */
+ if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)
+ return;
+
+ ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;
+ ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;
+ ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;
+}
+#else /* !CONFIG_PTP_1588_CLOCK */
+static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
+ struct idpf_ctlq_msg *ctlq_msg)
+{ }
+#endif /* CONFIG_PTP_1588_CLOCK */
+
/**
* idpf_send_mb_msg - Send message over mailbox
* @adapter: Driver specific private structure
@@ -278,6 +246,9 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
ctlq_msg->func_id = 0;
+
+ idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);
+
ctlq_msg->data_len = msg_size;
ctlq_msg->cookie.mbx.chnl_opcode = op;
ctlq_msg->cookie.mbx.chnl_retval = 0;
@@ -376,7 +347,7 @@ static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
* All waiting threads will be woken-up and their transaction aborted. Further
* operations on that object will fail.
*/
-static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
+void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{
int i;
@@ -449,8 +420,8 @@ static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
* >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
* error.
*/
-static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
- const struct idpf_vc_xn_params *params)
+ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params)
{
const struct kvec *send_buf = &params->send_buf;
struct idpf_vc_xn *xn;
@@ -517,8 +488,10 @@ static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
retval = -ENXIO;
goto only_unlock;
case IDPF_VC_XN_WAITING:
- dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
- params->vc_op, params->timeout_ms);
+ dev_notice_ratelimited(&adapter->pdev->dev,
+ "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
+ params->vc_op, cookie, xn->vc_op,
+ xn->salt, params->timeout_ms);
retval = -ETIME;
break;
case IDPF_VC_XN_COMPLETED_SUCCESS:
@@ -612,14 +585,16 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
return -EINVAL;
}
xn = &adapter->vcxn_mngr->ring[xn_idx];
+ idpf_vc_xn_lock(xn);
salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
if (xn->salt != salt) {
- dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
- xn->salt, salt);
+ dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
+ xn->vc_op, xn->salt, xn->state,
+ ctlq_msg->cookie.mbx.chnl_opcode, salt);
+ idpf_vc_xn_unlock(xn);
return -EINVAL;
}
- idpf_vc_xn_lock(xn);
switch (xn->state) {
case IDPF_VC_XN_WAITING:
/* success */
@@ -896,7 +871,8 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_MACFILTER |
VIRTCHNL2_CAP_SPLITQ_QSCHED |
VIRTCHNL2_CAP_PROMISC |
- VIRTCHNL2_CAP_LOOPBACK);
+ VIRTCHNL2_CAP_LOOPBACK |
+ VIRTCHNL2_CAP_PTP);
xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
xn_params.send_buf.iov_base = &caps;
@@ -3025,6 +3001,11 @@ restart:
goto err_intr_req;
}
+ err = idpf_ptp_init(adapter);
+ if (err)
+ pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
+ ERR_PTR(err));
+
idpf_init_avail_queues(adapter);
/* Skew the delay for init tasks for each function based on fn number
@@ -3077,12 +3058,22 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
+ bool remove_in_prog;
+
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
return;
+ /* Avoid transaction timeouts when called during reset */
+ remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
+ if (!remove_in_prog)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+
+ idpf_ptp_release(adapter);
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
- idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+
+ if (remove_in_prog)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
@@ -3145,6 +3136,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
u16 rx_itr[] = {2, 8, 32, 96, 128};
struct idpf_rss_data *rss_data;
u16 idx = vport->idx;
+ int err;
vport_config = adapter->vport_config[idx];
rss_data = &vport_config->user_config.rss_data;
@@ -3179,6 +3171,18 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
idpf_vport_alloc_vec_indexes(vport);
vport->crc_enable = adapter->crc_enable;
+
+ if (!(vport_msg->vport_flags &
+ cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
+ return;
+
+ err = idpf_ptp_get_vport_tstamps_caps(vport);
+ if (err) {
+ pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
+ return;
+ }
+
+ INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index 83da5d8da56b..77578206bada 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -4,6 +4,88 @@
#ifndef _IDPF_VIRTCHNL_H_
#define _IDPF_VIRTCHNL_H_
+#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
+#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
+#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
+#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
+#define IDPF_VC_XN_RING_LEN U8_MAX
+
+/**
+ * enum idpf_vc_xn_state - Virtchnl transaction status
+ * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
+ * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
+ * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, buffer
+ * updated
+ * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
+ * was an error, buffer not updated
+ * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
+ * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
+ * return context; a callback may be provided to handle
+ * return
+ */
+enum idpf_vc_xn_state {
+ IDPF_VC_XN_IDLE = 1,
+ IDPF_VC_XN_WAITING,
+ IDPF_VC_XN_COMPLETED_SUCCESS,
+ IDPF_VC_XN_COMPLETED_FAILED,
+ IDPF_VC_XN_SHUTDOWN,
+ IDPF_VC_XN_ASYNC,
+};
+
+struct idpf_vc_xn;
+/* Callback for asynchronous messages */
+typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ const struct idpf_ctlq_msg *);
+
+/**
+ * struct idpf_vc_xn - Data structure representing virtchnl transactions
+ * @completed: virtchnl event loop uses that to signal when a reply is
+ * available, uses kernel completion API
+ * @state: virtchnl event loop stores the data below, protected by the
+ * completion's lock.
+ * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
+ * truncated on its way to the receiver thread according to
+ * reply_buf.iov_len.
+ * @reply: Reference to the buffer(s) where the reply data should be written
+ * to. May be 0-length (then NULL address permitted) if the reply data
+ * should be ignored.
+ * @async_handler: if sent asynchronously, a callback can be provided to handle
+ * the reply when it's received
+ * @vc_op: corresponding opcode sent with this transaction
+ * @idx: index used as retrieval on reply receive, used for cookie
+ * @salt: changed every message to make unique, used for cookie
+ */
+struct idpf_vc_xn {
+ struct completion completed;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+ async_vc_cb async_handler;
+ u32 vc_op;
+ u8 idx;
+ u8 salt;
+};
+
+/**
+ * struct idpf_vc_xn_params - Parameters for executing transaction
+ * @send_buf: kvec for send buffer
+ * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
+ * @timeout_ms: timeout to wait for reply
+ * @async: send message asynchronously, will not wait on completion
+ * @async_handler: If sent asynchronously, optional callback handler. The user
+ * must be careful when using async handlers as the memory for
+ * the recv_buf _cannot_ be on stack if this is async.
+ * @vc_op: virtchnl op to send
+ */
+struct idpf_vc_xn_params {
+ struct kvec send_buf;
+ struct kvec recv_buf;
+ int timeout_ms;
+ bool async;
+ async_vc_cb async_handler;
+ u32 vc_op;
+};
+
struct idpf_adapter;
struct idpf_netdev_priv;
struct idpf_vec_regs;
@@ -11,6 +93,8 @@ struct idpf_vport;
struct idpf_vport_max_q;
struct idpf_vport_user_config_data;
+ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params);
int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
int idpf_vc_core_init(struct idpf_adapter *adapter);
@@ -66,5 +150,6 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
#endif /* _IDPF_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
new file mode 100644
index 000000000000..bdcc54a5fb56
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_ptp.h"
+#include "idpf_virtchnl.h"
+
+/**
+ * idpf_ptp_get_caps - Send virtchnl get ptp capabilities message
+ * @adapter: Driver specific private structure
+ *
+ * Send virtchnl get PTP capabilities message.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_ptp_get_caps(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL;
+ struct virtchnl2_ptp_get_caps send_ptp_caps_msg = {
+ .caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME |
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB |
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME |
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB |
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB |
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB)
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_CAPS,
+ .send_buf.iov_base = &send_ptp_caps_msg,
+ .send_buf.iov_len = sizeof(send_ptp_caps_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
+ struct virtchnl2_ptp_clk_reg_offsets clock_offsets;
+ struct idpf_ptp_secondary_mbx *scnd_mbx;
+ struct idpf_ptp *ptp = adapter->ptp;
+ enum idpf_ptp_access access_type;
+ u32 temp_offset;
+ int reply_sz;
+
+ recv_ptp_caps_msg = kzalloc(sizeof(struct virtchnl2_ptp_get_caps),
+ GFP_KERNEL);
+ if (!recv_ptp_caps_msg)
+ return -ENOMEM;
+
+ xn_params.recv_buf.iov_base = recv_ptp_caps_msg;
+ xn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg);
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ else if (reply_sz != sizeof(*recv_ptp_caps_msg))
+ return -EIO;
+
+ ptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps);
+ ptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval);
+ ptp->max_adj = le32_to_cpu(recv_ptp_caps_msg->max_adj);
+
+ scnd_mbx = &ptp->secondary_mbx;
+ scnd_mbx->peer_mbx_q_id = le16_to_cpu(recv_ptp_caps_msg->peer_mbx_q_id);
+
+ /* if the ptp_mb_q_id holds invalid value (0xffff), the secondary
+ * mailbox is not supported.
+ */
+ scnd_mbx->valid = scnd_mbx->peer_mbx_q_id != 0xffff;
+ if (scnd_mbx->valid)
+ scnd_mbx->peer_id = recv_ptp_caps_msg->peer_id;
+
+ /* Determine the access type for the PTP features */
+ idpf_ptp_get_features_access(adapter);
+
+ access_type = ptp->get_dev_clk_time_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ goto discipline_clock;
+
+ clock_offsets = recv_ptp_caps_msg->clk_offsets;
+
+ temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_l);
+ ptp->dev_clk_regs.dev_clk_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_h);
+ ptp->dev_clk_regs.dev_clk_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_l);
+ ptp->dev_clk_regs.phy_clk_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_h);
+ ptp->dev_clk_regs.phy_clk_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger);
+ ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
+
+discipline_clock:
+ access_type = ptp->adj_dev_clk_time_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ return 0;
+
+ clk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets;
+
+ /* Device clock offsets */
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_cmd_type);
+ ptp->dev_clk_regs.cmd = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_l);
+ ptp->dev_clk_regs.incval_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_h);
+ ptp->dev_clk_regs.incval_h = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_l);
+ ptp->dev_clk_regs.shadj_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_h);
+ ptp->dev_clk_regs.shadj_h = idpf_get_reg_addr(adapter, temp_offset);
+
+ /* PHY clock offsets */
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_cmd_type);
+ ptp->dev_clk_regs.phy_cmd = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_l);
+ ptp->dev_clk_regs.phy_incval_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_h);
+ ptp->dev_clk_regs.phy_incval_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_l);
+ ptp->dev_clk_regs.phy_shadj_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_h);
+ ptp->dev_clk_regs.phy_shadj_h = idpf_get_reg_addr(adapter, temp_offset);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_dev_clk_time - Send virtchnl get device clk time message
+ * @adapter: Driver specific private structure
+ * @dev_clk_time: Pointer to the device clock structure where the value is set
+ *
+ * Send virtchnl get time message to get the time of the clock.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time)
+{
+ struct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME,
+ .send_buf.iov_base = &get_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(get_dev_clk_time_msg),
+ .recv_buf.iov_base = &get_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(get_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+ u64 dev_time;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(get_dev_clk_time_msg))
+ return -EIO;
+
+ dev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns);
+ dev_clk_time->dev_clk_time_ns = dev_time;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message
+ * @adapter: Driver specific private structure
+ * @time: New time value
+ *
+ * Send virtchnl set time message to set the time of the clock.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time)
+{
+ struct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = {
+ .dev_time_ns = cpu_to_le64(time),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME,
+ .send_buf.iov_base = &set_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(set_dev_clk_time_msg),
+ .recv_buf.iov_base = &set_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(set_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(set_dev_clk_time_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adj_dev_clk_time - Send virtchnl adj device clock time message
+ * @adapter: Driver specific private structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Send virtchnl adj time message to adjust the clock by the indicated delta.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta)
+{
+ struct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = {
+ .delta = cpu_to_le64(delta),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME,
+ .send_buf.iov_base = &adj_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(adj_dev_clk_time_msg),
+ .recv_buf.iov_base = &adj_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(adj_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(adj_dev_clk_time_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adj_dev_clk_fine - Send virtchnl adj time message
+ * @adapter: Driver specific private structure
+ * @incval: Source timer increment value per clock cycle
+ *
+ * Send virtchnl adj fine message to adjust the frequency of the clock by
+ * incval.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval)
+{
+ struct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = {
+ .incval = cpu_to_le64(incval),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE,
+ .send_buf.iov_base = &adj_dev_clk_fine_msg,
+ .send_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
+ .recv_buf.iov_base = &adj_dev_clk_fine_msg,
+ .recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(adj_dev_clk_fine_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_vport_tstamps_caps - Send virtchnl to get tstamps caps for vport
+ * @vport: Virtual port structure
+ *
+ * Send virtchnl get vport tstamps caps message to receive the set of tstamp
+ * capabilities per vport.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_caps send_tx_tstamp_caps;
+ struct virtchnl2_ptp_get_vport_tx_tstamp_caps *rcv_tx_tstamp_caps;
+ struct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps;
+ struct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps;
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS,
+ .send_buf.iov_base = &send_tx_tstamp_caps,
+ .send_buf.iov_len = sizeof(send_tx_tstamp_caps),
+ .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ enum idpf_ptp_access tstamp_access, get_dev_clk_access;
+ struct idpf_ptp *ptp = vport->adapter->ptp;
+ struct list_head *head;
+ int err = 0, reply_sz;
+ u16 num_latches;
+ u32 size;
+
+ if (!ptp)
+ return -EOPNOTSUPP;
+
+ tstamp_access = ptp->tx_tstamp_access;
+ get_dev_clk_access = ptp->get_dev_clk_time_access;
+ if (tstamp_access == IDPF_PTP_NONE ||
+ get_dev_clk_access == IDPF_PTP_NONE)
+ return -EOPNOTSUPP;
+
+ rcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcv_tx_tstamp_caps)
+ return -ENOMEM;
+
+ send_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id);
+ xn_params.recv_buf.iov_base = rcv_tx_tstamp_caps;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0) {
+ err = reply_sz;
+ goto get_tstamp_caps_out;
+ }
+
+ num_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches);
+ size = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches);
+ if (reply_sz != size) {
+ err = -EIO;
+ goto get_tstamp_caps_out;
+ }
+
+ size = struct_size(tstamp_caps, tx_tstamp_status, num_latches);
+ tstamp_caps = kzalloc(size, GFP_KERNEL);
+ if (!tstamp_caps) {
+ err = -ENOMEM;
+ goto get_tstamp_caps_out;
+ }
+
+ tstamp_caps->access = true;
+ tstamp_caps->num_entries = num_latches;
+
+ INIT_LIST_HEAD(&tstamp_caps->latches_in_use);
+ INIT_LIST_HEAD(&tstamp_caps->latches_free);
+
+ spin_lock_init(&tstamp_caps->latches_lock);
+ spin_lock_init(&tstamp_caps->status_lock);
+
+ tstamp_caps->tstamp_ns_lo_bit = rcv_tx_tstamp_caps->tstamp_ns_lo_bit;
+
+ for (u16 i = 0; i < tstamp_caps->num_entries; i++) {
+ __le32 offset_l, offset_h;
+
+ ptp_tx_tstamp = kzalloc(sizeof(*ptp_tx_tstamp), GFP_KERNEL);
+ if (!ptp_tx_tstamp) {
+ err = -ENOMEM;
+ goto err_free_ptp_tx_stamp_list;
+ }
+
+ tx_tstamp_latch_caps = rcv_tx_tstamp_caps->tstamp_latches[i];
+
+ if (tstamp_access != IDPF_PTP_DIRECT)
+ goto skip_offsets;
+
+ offset_l = tx_tstamp_latch_caps.tx_latch_reg_offset_l;
+ offset_h = tx_tstamp_latch_caps.tx_latch_reg_offset_h;
+ ptp_tx_tstamp->tx_latch_reg_offset_l = le32_to_cpu(offset_l);
+ ptp_tx_tstamp->tx_latch_reg_offset_h = le32_to_cpu(offset_h);
+
+skip_offsets:
+ ptp_tx_tstamp->idx = tx_tstamp_latch_caps.index;
+
+ list_add(&ptp_tx_tstamp->list_member,
+ &tstamp_caps->latches_free);
+
+ tstamp_caps->tx_tstamp_status[i].state = IDPF_PTP_FREE;
+ }
+
+ vport->tx_tstamp_caps = tstamp_caps;
+ kfree(rcv_tx_tstamp_caps);
+
+ return 0;
+
+err_free_ptp_tx_stamp_list:
+ head = &tstamp_caps->latches_free;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ kfree(tstamp_caps);
+get_tstamp_caps_out:
+ kfree(rcv_tx_tstamp_caps);
+
+ return err;
+}
+
+/**
+ * idpf_ptp_update_tstamp_tracker - Update the Tx timestamp tracker based on
+ * the skb compatibility.
+ * @caps: Tx timestamp capabilities that monitor the latch status
+ * @skb: skb for which the tstamp value is returned through virtchnl message
+ * @current_state: Current state of the Tx timestamp latch
+ * @expected_state: Expected state of the Tx timestamp latch
+ *
+ * Find a proper skb tracker for which the Tx timestamp is received and change
+ * the state to expected value.
+ *
+ * Return: true if the tracker has been found and updated, false otherwise.
+ */
+static bool
+idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps *caps,
+ struct sk_buff *skb,
+ enum idpf_ptp_tx_tstamp_state current_state,
+ enum idpf_ptp_tx_tstamp_state expected_state)
+{
+ bool updated = false;
+
+ spin_lock(&caps->status_lock);
+ for (u16 i = 0; i < caps->num_entries; i++) {
+ struct idpf_ptp_tx_tstamp_status *status;
+
+ status = &caps->tx_tstamp_status[i];
+
+ if (skb == status->skb && status->state == current_state) {
+ status->state = expected_state;
+ updated = true;
+ break;
+ }
+ }
+ spin_unlock(&caps->status_lock);
+
+ return updated;
+}
+
+/**
+ * idpf_ptp_get_tstamp_value - Get the Tx timestamp value and provide it
+ * back to the skb.
+ * @vport: Virtual port structure
+ * @tstamp_latch: Tx timestamp latch structure fulfilled by the Control Plane
+ * @ptp_tx_tstamp: Tx timestamp latch to add to the free list
+ *
+ * Read the value of the Tx timestamp for a given latch received from the
+ * Control Plane, extend it to 64 bit and provide back to the skb.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+idpf_ptp_get_tstamp_value(struct idpf_vport *vport,
+ struct virtchnl2_ptp_tx_tstamp_latch *tstamp_latch,
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp)
+{
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct skb_shared_hwtstamps shhwtstamps;
+ bool state_upd = false;
+ u8 tstamp_ns_lo_bit;
+ u64 tstamp;
+
+ tx_tstamp_caps = vport->tx_tstamp_caps;
+ tstamp_ns_lo_bit = tx_tstamp_caps->tstamp_ns_lo_bit;
+
+ ptp_tx_tstamp->tstamp = le64_to_cpu(tstamp_latch->tstamp);
+ ptp_tx_tstamp->tstamp >>= tstamp_ns_lo_bit;
+
+ state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
+ ptp_tx_tstamp->skb,
+ IDPF_PTP_READ_VALUE,
+ IDPF_PTP_FREE);
+ if (!state_upd)
+ return -EINVAL;
+
+ tstamp = idpf_ptp_extend_ts(vport, ptp_tx_tstamp->tstamp);
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps);
+ consume_skb(ptp_tx_tstamp->skb);
+
+ list_add(&ptp_tx_tstamp->list_member,
+ &tx_tstamp_caps->latches_free);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps
+ * @adapter: Driver specific private structure
+ * @xn: transaction for message
+ * @ctlq_msg: received message
+ *
+ * Read the tstamps Tx tstamp values from a received message and put them
+ * directly to the skb. The number of timestamps to read is specified by
+ * the virtchnl message.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter,
+ struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct virtchnl2_ptp_tx_tstamp_latch tstamp_latch;
+ struct idpf_ptp_tx_tstamp *tx_tstamp, *tmp;
+ struct idpf_vport *tstamp_vport = NULL;
+ struct list_head *head;
+ u16 num_latches;
+ u32 vport_id;
+ int err = 0;
+
+ recv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va;
+ vport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id);
+
+ idpf_for_each_vport(adapter, vport) {
+ if (!vport)
+ continue;
+
+ if (vport->vport_id == vport_id) {
+ tstamp_vport = vport;
+ break;
+ }
+ }
+
+ if (!tstamp_vport || !tstamp_vport->tx_tstamp_caps)
+ return -EINVAL;
+
+ tx_tstamp_caps = tstamp_vport->tx_tstamp_caps;
+ num_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches);
+
+ spin_lock_bh(&tx_tstamp_caps->latches_lock);
+ head = &tx_tstamp_caps->latches_in_use;
+
+ for (u16 i = 0; i < num_latches; i++) {
+ tstamp_latch = recv_tx_tstamp_msg->tstamp_latches[i];
+
+ if (!tstamp_latch.valid)
+ continue;
+
+ if (list_empty(head)) {
+ err = -ENOBUFS;
+ goto unlock;
+ }
+
+ list_for_each_entry_safe(tx_tstamp, tmp, head, list_member) {
+ if (tstamp_latch.index == tx_tstamp->idx) {
+ list_del(&tx_tstamp->list_member);
+ err = idpf_ptp_get_tstamp_value(tstamp_vport,
+ &tstamp_latch,
+ tx_tstamp);
+ if (err)
+ goto unlock;
+
+ break;
+ }
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&tx_tstamp_caps->latches_lock);
+
+ return err;
+}
+
+/**
+ * idpf_ptp_get_tx_tstamp - Send virtchnl get Tx timestamp latches message
+ * @vport: Virtual port structure
+ *
+ * Send virtchnl get Tx tstamp message to read the value of the HW timestamp.
+ * The message contains a list of indexes set in the Tx descriptors.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ .async = true,
+ .async_handler = idpf_ptp_get_tx_tstamp_async_handler,
+ };
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
+ int reply_sz, size, msg_size;
+ struct list_head *head;
+ bool state_upd;
+ u16 id = 0;
+
+ tx_tstamp_caps = vport->tx_tstamp_caps;
+ head = &tx_tstamp_caps->latches_in_use;
+
+ size = struct_size(send_tx_tstamp_msg, tstamp_latches,
+ tx_tstamp_caps->num_entries);
+ send_tx_tstamp_msg = kzalloc(size, GFP_KERNEL);
+ if (!send_tx_tstamp_msg)
+ return -ENOMEM;
+
+ spin_lock_bh(&tx_tstamp_caps->latches_lock);
+ list_for_each_entry(ptp_tx_tstamp, head, list_member) {
+ u8 idx;
+
+ state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
+ ptp_tx_tstamp->skb,
+ IDPF_PTP_REQUEST,
+ IDPF_PTP_READ_VALUE);
+ if (!state_upd)
+ continue;
+
+ idx = ptp_tx_tstamp->idx;
+ send_tx_tstamp_msg->tstamp_latches[id].index = idx;
+ id++;
+ }
+ spin_unlock_bh(&tx_tstamp_caps->latches_lock);
+
+ msg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id);
+ send_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id);
+ send_tx_tstamp_msg->num_latches = cpu_to_le16(id);
+ xn_params.send_buf.iov_base = send_tx_tstamp_msg;
+ xn_params.send_buf.iov_len = msg_size;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ kfree(send_tx_tstamp_msg);
+
+ return min(reply_sz, 0);
+}
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 63deb120359c..11b8f6f05799 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -68,6 +68,16 @@ enum virtchnl2_op {
VIRTCHNL2_OP_ADD_MAC_ADDR = 535,
VIRTCHNL2_OP_DEL_MAC_ADDR = 536,
VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537,
+
+ /* TimeSync opcodes */
+ VIRTCHNL2_OP_PTP_GET_CAPS = 541,
+ VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP = 542,
+ VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME = 543,
+ VIRTCHNL2_OP_PTP_GET_CROSS_TIME = 544,
+ VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME = 545,
+ VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE = 546,
+ VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547,
+ VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548,
};
/**
@@ -560,6 +570,14 @@ struct virtchnl2_queue_reg_chunks {
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
/**
+ * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities.
+ * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports
+ */
+enum virtchnl2_vport_flags {
+ VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
+};
+
+/**
* struct virtchnl2_create_vport - Create vport config info.
* @vport_type: See enum virtchnl2_vport_type.
* @txq_model: See virtchnl2_queue_model.
@@ -577,7 +595,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
* @max_mtu: Max MTU. CP populates this field on response.
* @vport_id: Vport id. CP populates this field on response.
* @default_mac_addr: Default MAC address.
- * @pad: Padding.
+ * @vport_flags: See enum virtchnl2_vport_flags.
* @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
* @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
* @pad1: Padding.
@@ -610,7 +628,7 @@ struct virtchnl2_create_vport {
__le16 max_mtu;
__le32 vport_id;
u8 default_mac_addr[ETH_ALEN];
- __le16 pad;
+ __le16 vport_flags;
__le64 rx_desc_ids;
__le64 tx_desc_ids;
u8 pad1[72];
@@ -1270,4 +1288,296 @@ struct virtchnl2_promisc_info {
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
+/**
+ * enum virtchnl2_ptp_caps - PTP capabilities
+ * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME: direct access to get the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB: mailbox access to get the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME: direct access to cross timestamp
+ * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB: mailbox access to cross timestamp
+ * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME: direct access to set the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB: mailbox access to set the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK: direct access to adjust the time of device
+ * clock
+ * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB: mailbox access to adjust the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS: direct access to the Tx timestamping
+ * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB: mailbox access to the Tx timestamping
+ *
+ * PF/VF negotiates a set of supported PTP capabilities with the Control Plane.
+ * There are two access methods - mailbox (_MB) and direct.
+ * PTP capabilities enables Main Timer operations: get/set/adjust Main Timer,
+ * cross timestamping and the Tx timestamping.
+ */
+enum virtchnl2_ptp_caps {
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME = BIT(0),
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB = BIT(1),
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME = BIT(2),
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB = BIT(3),
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME = BIT(4),
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB = BIT(5),
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK = BIT(6),
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB = BIT(7),
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS = BIT(8),
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB = BIT(9),
+};
+
+/**
+ * struct virtchnl2_ptp_clk_reg_offsets - Offsets of device and PHY clocks
+ * registers.
+ * @dev_clk_ns_l: Device clock low register offset
+ * @dev_clk_ns_h: Device clock high register offset
+ * @phy_clk_ns_l: PHY clock low register offset
+ * @phy_clk_ns_h: PHY clock high register offset
+ * @cmd_sync_trigger: The command sync trigger register offset
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_clk_reg_offsets {
+ __le32 dev_clk_ns_l;
+ __le32 dev_clk_ns_h;
+ __le32 phy_clk_ns_l;
+ __le32 phy_clk_ns_h;
+ __le32 cmd_sync_trigger;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_clk_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_cross_time_reg_offsets - Offsets of the device cross
+ * time registers.
+ * @sys_time_ns_l: System time low register offset
+ * @sys_time_ns_h: System time high register offset
+ * @cmd_sync_trigger: The command sync trigger register offset
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_cross_time_reg_offsets {
+ __le32 sys_time_ns_l;
+ __le32 sys_time_ns_h;
+ __le32 cmd_sync_trigger;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_cross_time_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_clk_adj_reg_offsets - Offsets of device and PHY clocks
+ * adjustments registers.
+ * @dev_clk_cmd_type: Device clock command type register offset
+ * @dev_clk_incval_l: Device clock increment value low register offset
+ * @dev_clk_incval_h: Device clock increment value high registers offset
+ * @dev_clk_shadj_l: Device clock shadow adjust low register offset
+ * @dev_clk_shadj_h: Device clock shadow adjust high register offset
+ * @phy_clk_cmd_type: PHY timer command type register offset
+ * @phy_clk_incval_l: PHY timer increment value low register offset
+ * @phy_clk_incval_h: PHY timer increment value high register offset
+ * @phy_clk_shadj_l: PHY timer shadow adjust low register offset
+ * @phy_clk_shadj_h: PHY timer shadow adjust high register offset
+ */
+struct virtchnl2_ptp_clk_adj_reg_offsets {
+ __le32 dev_clk_cmd_type;
+ __le32 dev_clk_incval_l;
+ __le32 dev_clk_incval_h;
+ __le32 dev_clk_shadj_l;
+ __le32 dev_clk_shadj_h;
+ __le32 phy_clk_cmd_type;
+ __le32 phy_clk_incval_l;
+ __le32 phy_clk_incval_h;
+ __le32 phy_clk_shadj_l;
+ __le32 phy_clk_shadj_h;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_ptp_clk_adj_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_tx_tstamp_latch_caps - PTP Tx timestamp latch
+ * capabilities.
+ * @tx_latch_reg_offset_l: Tx timestamp latch low register offset
+ * @tx_latch_reg_offset_h: Tx timestamp latch high register offset
+ * @index: Latch index provided to the Tx descriptor
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_tx_tstamp_latch_caps {
+ __le32 tx_latch_reg_offset_l;
+ __le32 tx_latch_reg_offset_h;
+ u8 index;
+ u8 pad[7];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch_caps);
+
+/**
+ * struct virtchnl2_ptp_get_vport_tx_tstamp_caps - Structure that defines Tx
+ * tstamp entries.
+ * @vport_id: Vport number
+ * @num_latches: Total number of latches
+ * @tstamp_ns_lo_bit: First bit for nanosecond part of the timestamp
+ * @tstamp_ns_hi_bit: Last bit for nanosecond part of the timestamp
+ * @pad: Padding for future tstamp granularity extensions
+ * @tstamp_latches: Capabilities of Tx timestamp entries
+ *
+ * PF/VF sends this message to negotiate the Tx timestamp latches for each
+ * Vport.
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS.
+ */
+struct virtchnl2_ptp_get_vport_tx_tstamp_caps {
+ __le32 vport_id;
+ __le16 num_latches;
+ u8 tstamp_ns_lo_bit;
+ u8 tstamp_ns_hi_bit;
+ u8 pad[8];
+
+ struct virtchnl2_ptp_tx_tstamp_latch_caps tstamp_latches[]
+ __counted_by_le(num_latches);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_caps);
+
+/**
+ * struct virtchnl2_ptp_get_caps - Get PTP capabilities
+ * @caps: PTP capability bitmap. See enum virtchnl2_ptp_caps
+ * @max_adj: The maximum possible frequency adjustment
+ * @base_incval: The default timer increment value
+ * @peer_mbx_q_id: ID of the PTP Device Control daemon queue
+ * @peer_id: Peer ID for PTP Device Control daemon
+ * @secondary_mbx: Indicates to the driver that it should create a secondary
+ * mailbox to inetract with control plane for PTP
+ * @pad: Padding for future extensions
+ * @clk_offsets: Main timer and PHY registers offsets
+ * @cross_time_offsets: Cross time registers offsets
+ * @clk_adj_offsets: Offsets needed to adjust the PHY and the main timer
+ *
+ * PF/VF sends this message to negotiate PTP capabilities. CP updates bitmap
+ * with supported features and fulfills appropriate structures.
+ * If HW uses primary MBX for PTP: secondary_mbx is set to false.
+ * If HW uses secondary MBX for PTP: secondary_mbx is set to true.
+ * Control plane has 2 MBX and the driver has 1 MBX, send to peer
+ * driver may be used to send a message using valid ptp_peer_mb_q_id and
+ * ptp_peer_id.
+ * If HW does not use send to peer driver: secondary_mbx is no care field and
+ * peer_mbx_q_id holds invalid value (0xFFFF).
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_CAPS.
+ */
+struct virtchnl2_ptp_get_caps {
+ __le32 caps;
+ __le32 max_adj;
+ __le64 base_incval;
+ __le16 peer_mbx_q_id;
+ u8 peer_id;
+ u8 secondary_mbx;
+ u8 pad[4];
+
+ struct virtchnl2_ptp_clk_reg_offsets clk_offsets;
+ struct virtchnl2_ptp_cross_time_reg_offsets cross_time_offsets;
+ struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(104, virtchnl2_ptp_get_caps);
+
+/**
+ * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp
+ * values, index and validity.
+ * @tstamp: Timestamp value
+ * @index: Timestamp index from which the value is read
+ * @valid: Timestamp validity
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_tx_tstamp_latch {
+ __le64 tstamp;
+ u8 index;
+ u8 valid;
+ u8 pad[6];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
+
+/**
+ * struct virtchnl2_ptp_get_vport_tx_tstamp_latches - Tx timestamp latches
+ * associated with the vport.
+ * @vport_id: Number of vport that requests the timestamp
+ * @num_latches: Number of latches
+ * @get_devtime_with_txtstmp: Flag to request device time along with Tx timestamp
+ * @pad: Padding for future extensions
+ * @device_time: device time if get_devtime_with_txtstmp was set in request
+ * @tstamp_latches: PTP TX timestamp latch
+ *
+ * PF/VF sends this message to receive a specified number of timestamps
+ * entries.
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP.
+ */
+struct virtchnl2_ptp_get_vport_tx_tstamp_latches {
+ __le32 vport_id;
+ __le16 num_latches;
+ u8 get_devtime_with_txtstmp;
+ u8 pad[1];
+ __le64 device_time;
+
+ struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[]
+ __counted_by_le(num_latches);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_latches);
+
+/**
+ * struct virtchnl2_ptp_get_dev_clk_time - Associated with message
+ * VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME.
+ * @dev_time_ns: Device clock time value in nanoseconds
+ *
+ * PF/VF sends this message to receive the time from the main timer.
+ */
+struct virtchnl2_ptp_get_dev_clk_time {
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_get_dev_clk_time);
+
+/**
+ * struct virtchnl2_ptp_get_cross_time: Associated with message
+ * VIRTCHNL2_OP_PTP_GET_CROSS_TIME.
+ * @sys_time_ns: System counter value expressed in nanoseconds, read
+ * synchronously with device time
+ * @dev_time_ns: Device clock time value expressed in nanoseconds
+ *
+ * PF/VF sends this message to receive the cross time.
+ */
+struct virtchnl2_ptp_get_cross_time {
+ __le64 sys_time_ns;
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_cross_time);
+
+/**
+ * struct virtchnl2_ptp_set_dev_clk_time: Associated with message
+ * VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME.
+ * @dev_time_ns: Device time value expressed in nanoseconds to set
+ *
+ * PF/VF sends this message to set the time of the main timer.
+ */
+struct virtchnl2_ptp_set_dev_clk_time {
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_set_dev_clk_time);
+
+/**
+ * struct virtchnl2_ptp_adj_dev_clk_fine: Associated with message
+ * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE.
+ * @incval: Source timer increment value per clock cycle
+ *
+ * PF/VF sends this message to adjust the frequency of the main timer by the
+ * indicated increment value.
+ */
+struct virtchnl2_ptp_adj_dev_clk_fine {
+ __le64 incval;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_fine);
+
+/**
+ * struct virtchnl2_ptp_adj_dev_clk_time: Associated with message
+ * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME.
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * PF/VF sends this message to adjust the time of the main timer by the delta.
+ */
+struct virtchnl2_ptp_adj_dev_clk_time {
+ __le64 delta;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time);
+
#endif /* _VIRTCHNL_2_H_ */
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 463c0d26b9d4..6c1b702fd992 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-y := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o igb_hwmon.o
+ e1000_i210.o igb_ptp.o igb_hwmon.o igb_xsk.o
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 3c2dc7bdebb5..f34ead8243e9 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -18,8 +18,10 @@
#include <linux/i2c-algo-bit.h>
#include <linux/pci.h>
#include <linux/mdio.h>
+#include <linux/lockdep.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
struct igb_adapter;
@@ -86,6 +88,7 @@ struct igb_adapter;
#define IGB_XDP_CONSUMED BIT(0)
#define IGB_XDP_TX BIT(1)
#define IGB_XDP_REDIR BIT(2)
+#define IGB_XDP_EXIT BIT(3)
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
@@ -255,6 +258,7 @@ enum igb_tx_flags {
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
+ IGB_TYPE_XSK
};
/* wrapper around a pointer to a socket buffer,
@@ -320,6 +324,7 @@ struct igb_ring {
union { /* array of buffer info structs */
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
+ struct xdp_buff **rx_buffer_info_zc;
};
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
@@ -357,6 +362,7 @@ struct igb_ring {
};
};
struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
struct igb_q_vector {
@@ -384,7 +390,9 @@ enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
- IGB_RING_FLAG_TX_DETECT_HANG
+ IGB_RING_FLAG_TX_DETECT_HANG,
+ IGB_RING_FLAG_TX_DISABLED,
+ IGB_RING_FLAG_RX_ALLOC_FAILED,
};
#define ring_uses_large_buffer(ring) \
@@ -715,6 +723,8 @@ enum igb_boards {
extern char igb_driver_name[];
+void igb_set_queue_napi(struct igb_adapter *adapter, int q_idx,
+ struct napi_struct *napi);
int igb_xmit_xdp_ring(struct igb_adapter *adapter,
struct igb_ring *ring,
struct xdp_frame *xdpf);
@@ -731,12 +741,21 @@ int igb_setup_tx_resources(struct igb_ring *);
int igb_setup_rx_resources(struct igb_ring *);
void igb_free_tx_resources(struct igb_ring *);
void igb_free_rx_resources(struct igb_ring *);
+void igb_clean_tx_ring(struct igb_ring *tx_ring);
+void igb_clean_rx_ring(struct igb_ring *rx_ring);
void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status);
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp);
+void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *);
bool igb_has_link(struct igb_adapter *adapter);
@@ -797,6 +816,33 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
}
+/* This function assumes __netif_tx_lock is held by the caller. */
+static inline void igb_xdp_ring_update_tail(struct igb_ring *ring)
+{
+ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+}
+
+static inline struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
+{
+ unsigned int r_idx = smp_processor_id();
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter)
+{
+ return !!READ_ONCE(adapter->xdp_prog);
+}
+
int igb_add_filter(struct igb_adapter *adapter,
struct igb_nfc_filter *input);
int igb_erase_filter(struct igb_adapter *adapter,
@@ -807,4 +853,17 @@ int igb_add_mac_steering_filter(struct igb_adapter *adapter,
int igb_del_mac_steering_filter(struct igb_adapter *adapter,
const u8 *addr, u8 queue, u8 flags);
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring);
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid);
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count);
+void igb_clean_rx_ring_zc(struct igb_ring *rx_ring);
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
+ struct xsk_buff_pool *xsk_pool, const int budget);
+bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool);
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 08578980b651..b76a154e635e 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -33,7 +33,6 @@
#include <linux/bpf_trace.h>
#include <linux/pm_runtime.h>
#include <linux/etherdevice.h>
-#include <linux/lockdep.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -116,8 +115,6 @@ static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *);
static void igb_clean_all_tx_rings(struct igb_adapter *);
static void igb_clean_all_rx_rings(struct igb_adapter *);
-static void igb_clean_tx_ring(struct igb_ring *);
-static void igb_clean_rx_ring(struct igb_ring *);
static void igb_set_rx_mode(struct net_device *);
static void igb_update_phy_info(struct timer_list *);
static void igb_watchdog(struct timer_list *);
@@ -475,12 +472,17 @@ rx_ring_summary:
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
- struct igb_rx_buffer *buffer_info;
- buffer_info = &rx_ring->rx_buffer_info[i];
+ dma_addr_t dma = (dma_addr_t)0;
+ struct igb_rx_buffer *buffer_info = NULL;
rx_desc = IGB_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (!rx_ring->xsk_pool) {
+ buffer_info = &rx_ring->rx_buffer_info[i];
+ dma = buffer_info->dma;
+ }
+
if (i == rx_ring->next_to_use)
next_desc = " NTU";
else if (i == rx_ring->next_to_clean)
@@ -500,11 +502,11 @@ rx_ring_summary:
"R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- (u64)buffer_info->dma,
+ (u64)dma,
next_desc);
if (netif_msg_pktdata(adapter) &&
- buffer_info->dma && buffer_info->page) {
+ buffer_info && dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
@@ -637,6 +639,10 @@ static int __init igb_init_module(void)
dca_register_notify(&dca_notifier);
#endif
ret = pci_register_driver(&igb_driver);
+#ifdef CONFIG_IGB_DCA
+ if (ret)
+ dca_unregister_notify(&dca_notifier);
+#endif
return ret;
}
@@ -941,6 +947,9 @@ static int igb_request_msix(struct igb_adapter *adapter)
q_vector);
if (err)
goto err_free;
+
+ netif_napi_set_irq(&q_vector->napi,
+ adapter->msix_entries[vector].vector);
}
igb_configure_msix(adapter);
@@ -1188,7 +1197,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
+ netif_napi_add_config(adapter->netdev, &q_vector->napi, igb_poll,
+ v_idx);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -1986,7 +1996,11 @@ static void igb_configure(struct igb_adapter *adapter)
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
+ if (ring->xsk_pool)
+ igb_alloc_rx_buffers_zc(ring, ring->xsk_pool,
+ igb_desc_unused(ring));
+ else
+ igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
}
}
@@ -2086,6 +2100,22 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
wr32(E1000_CTRL_EXT, ctrl_ext);
}
+void igb_set_queue_napi(struct igb_adapter *adapter, int vector,
+ struct napi_struct *napi)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->rx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->rx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ if (q_vector->tx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->tx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* igb_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -2093,6 +2123,7 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
int igb_up(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int i;
/* hardware has been reset, we need to reload some things */
@@ -2100,8 +2131,11 @@ int igb_up(struct igb_adapter *adapter)
clear_bit(__IGB_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&(adapter->q_vector[i]->napi));
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igb_set_queue_napi(adapter, i, napi);
+ }
if (adapter->flags & IGB_FLAG_HAS_MSIX)
igb_configure_msix(adapter);
@@ -2171,12 +2205,13 @@ void igb_down(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
+ igb_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
/* record the stats before reset*/
spin_lock(&adapter->stats64_lock);
@@ -2907,37 +2942,20 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
switch (xdp->command) {
case XDP_SETUP_PROG:
return igb_xdp_setup(dev, xdp);
+ case XDP_SETUP_XSK_POOL:
+ return igb_xsk_pool_setup(adapter, xdp->xsk.pool,
+ xdp->xsk.queue_id);
default:
return -EINVAL;
}
}
-/* This function assumes __netif_tx_lock is held by the caller. */
-static void igb_xdp_ring_update_tail(struct igb_ring *ring)
-{
- lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
-
- /* Force memory writes to complete before letting h/w know there
- * are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
-}
-
-static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
-{
- unsigned int r_idx = smp_processor_id();
-
- if (r_idx >= adapter->num_tx_queues)
- r_idx = r_idx % adapter->num_tx_queues;
-
- return adapter->tx_ring[r_idx];
-}
-
-static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
+int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
int cpu = smp_processor_id();
@@ -2951,7 +2969,8 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ tx_ring = igb_xdp_is_enabled(adapter) ?
+ igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return IGB_XDP_CONSUMED;
@@ -2984,10 +3003,14 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ tx_ring = igb_xdp_is_enabled(adapter) ?
+ igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return -ENXIO;
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return -ENXIO;
+
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
@@ -3038,6 +3061,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_setup_tc = igb_setup_tc,
.ndo_bpf = igb_xdp,
.ndo_xdp_xmit = igb_xdp_xmit,
+ .ndo_xsk_wakeup = igb_xsk_wakeup,
};
/**
@@ -3334,7 +3358,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -3860,8 +3885,8 @@ static void igb_remove(struct pci_dev *pdev)
* disable watchdog from being rescheduled.
*/
set_bit(__IGB_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
@@ -4113,8 +4138,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
static int __igb_open(struct net_device *netdev, bool resuming)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int err;
int i;
@@ -4166,8 +4192,11 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&(adapter->q_vector[i]->napi));
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igb_set_queue_napi(adapter, i, napi);
+ }
/* Clear any pending interrupts. */
rd32(E1000_TSICR);
@@ -4360,6 +4389,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
u64 tdba = ring->dma;
int reg_idx = ring->reg_idx;
+ WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring));
+
wr32(E1000_TDLEN(reg_idx),
ring->count * sizeof(union e1000_adv_tx_desc));
wr32(E1000_TDBAL(reg_idx),
@@ -4420,7 +4451,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index, 0);
+ rx_ring->queue_index,
+ rx_ring->q_vector->napi.napi_id);
if (res < 0) {
dev_err(dev, "Failed to register xdp_rxq index %u\n",
rx_ring->queue_index);
@@ -4716,12 +4748,17 @@ void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
struct e1000_hw *hw = &adapter->hw;
int reg_idx = ring->reg_idx;
u32 srrctl = 0;
+ u32 buf_size;
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- if (ring_uses_large_buffer(ring))
- srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring->xsk_pool)
+ buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ else if (ring_uses_large_buffer(ring))
+ buf_size = IGB_RXBUFFER_3072;
else
- srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ buf_size = IGB_RXBUFFER_2048;
+
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -4753,8 +4790,17 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
u32 rxdctl = 0;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL));
+ WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring));
+ if (ring->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL));
+ }
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4781,9 +4827,12 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
- /* initialize rx_buffer_info */
- memset(ring->rx_buffer_info, 0,
- sizeof(struct igb_rx_buffer) * ring->count);
+ if (ring->xsk_pool)
+ memset(ring->rx_buffer_info_zc, 0,
+ sizeof(*ring->rx_buffer_info_zc) * ring->count);
+ else
+ memset(ring->rx_buffer_info, 0,
+ sizeof(*ring->rx_buffer_info) * ring->count);
/* initialize Rx descriptor 0 */
rx_desc = IGB_RX_DESC(ring, 0);
@@ -4884,19 +4933,24 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
* igb_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
**/
-static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
u16 i = tx_ring->next_to_clean;
struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ u32 xsk_frames = 0;
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs or xdp frames */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
dev_kfree_skb_any(tx_buffer->skb);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -4927,6 +4981,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
+skip_for_xsk:
tx_buffer->next_to_watch = NULL;
/* move us one more past the eop_desc for start of next pkt */
@@ -4941,6 +4996,9 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
/* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring));
+ if (tx_ring->xsk_pool && xsk_frames)
+ xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
+
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -4971,8 +5029,13 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
rx_ring->xdp_prog = NULL;
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
+ if (rx_ring->xsk_pool) {
+ vfree(rx_ring->rx_buffer_info_zc);
+ rx_ring->rx_buffer_info_zc = NULL;
+ } else {
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ }
/* if not set, then don't free */
if (!rx_ring->desc)
@@ -5003,13 +5066,18 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
* igb_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
**/
-static void igb_clean_rx_ring(struct igb_ring *rx_ring)
+void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
+ if (rx_ring->xsk_pool) {
+ igb_clean_rx_ring_zc(rx_ring);
+ goto skip_for_xsk;
+ }
+
/* Free all the Rx ring sk_buffs */
while (i != rx_ring->next_to_alloc) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
@@ -5037,6 +5105,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
i = 0;
}
+skip_for_xsk:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -5396,7 +5465,8 @@ static void igb_spoof_check(struct igb_adapter *adapter)
*/
static void igb_update_phy_info(struct timer_list *t)
{
- struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+ struct igb_adapter *adapter = timer_container_of(adapter, t,
+ phy_info_timer);
igb_get_phy_info(&adapter->hw);
}
@@ -5486,7 +5556,8 @@ static void igb_check_lvmmc(struct igb_adapter *adapter)
**/
static void igb_watchdog(struct timer_list *t)
{
- struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct igb_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
}
@@ -5686,11 +5757,29 @@ no_wait:
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
- for (i = 0; i < adapter->num_q_vectors; i++)
- eics |= adapter->q_vector[i]->eims_value;
- wr32(E1000_EICS, eics);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ struct igb_ring *rx_ring;
+
+ if (!q_vector->rx.ring)
+ continue;
+
+ rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index];
+
+ if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ eics |= q_vector->eims_value;
+ clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ }
+ }
+ if (eics)
+ wr32(E1000_EICS, eics);
} else {
- wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ struct igb_ring *rx_ring = adapter->rx_ring[0];
+
+ if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
}
igb_spoof_check(adapter);
@@ -6463,6 +6552,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return NETDEV_TX_BUSY;
+
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->type = IGB_TYPE_SKB;
@@ -6618,7 +6710,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
- if (adapter->xdp_prog) {
+ if (igb_xdp_is_enabled(adapter)) {
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -8191,6 +8283,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
struct igb_q_vector *q_vector = container_of(napi,
struct igb_q_vector,
napi);
+ struct xsk_buff_pool *xsk_pool;
bool clean_complete = true;
int work_done = 0;
@@ -8202,7 +8295,12 @@ static int igb_poll(struct napi_struct *napi, int budget)
clean_complete = igb_clean_tx_irq(q_vector, budget);
if (q_vector->rx.ring) {
- int cleaned = igb_clean_rx_irq(q_vector, budget);
+ int cleaned;
+
+ xsk_pool = READ_ONCE(q_vector->rx.ring->xsk_pool);
+ cleaned = xsk_pool ?
+ igb_clean_rx_irq_zc(q_vector, xsk_pool, budget) :
+ igb_clean_rx_irq(q_vector, budget);
work_done += cleaned;
if (cleaned >= budget)
@@ -8231,13 +8329,18 @@ static int igb_poll(struct napi_struct *napi, int budget)
**/
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
{
- struct igb_adapter *adapter = q_vector->adapter;
- struct igb_ring *tx_ring = q_vector->tx.ring;
- struct igb_tx_buffer *tx_buffer;
- union e1000_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_adapter *adapter = q_vector->adapter;
unsigned int budget = q_vector->tx.work_limit;
+ struct igb_ring *tx_ring = q_vector->tx.ring;
unsigned int i = tx_ring->next_to_clean;
+ union e1000_adv_tx_desc *tx_desc;
+ struct igb_tx_buffer *tx_buffer;
+ struct xsk_buff_pool *xsk_pool;
+ int cpu = smp_processor_id();
+ bool xsk_xmit_done = true;
+ struct netdev_queue *nq;
+ u32 xsk_frames = 0;
if (test_bit(__IGB_DOWN, &adapter->state))
return true;
@@ -8268,10 +8371,14 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
napi_consume_skb(tx_buffer->skb, napi_budget);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -8303,6 +8410,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
+skip_for_xsk:
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
tx_desc++;
@@ -8331,6 +8439,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+ xsk_pool = READ_ONCE(tx_ring->xsk_pool);
+ if (xsk_pool) {
+ if (xsk_frames)
+ xsk_tx_completed(xsk_pool, xsk_frames);
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ txq_trans_cond_update(nq);
+ xsk_xmit_done = igb_xmit_zc(tx_ring, xsk_pool);
+ __netif_tx_unlock(nq);
+ }
+
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
struct e1000_hw *hw = &adapter->hw;
@@ -8393,7 +8516,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
- return !!budget;
+ return !!budget && xsk_xmit_done;
}
/**
@@ -8584,9 +8707,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
return skb;
}
-static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
- struct igb_ring *rx_ring,
- struct xdp_buff *xdp)
+static int igb_run_xdp(struct igb_adapter *adapter, struct igb_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int err, result = IGB_XDP_PASS;
struct bpf_prog *xdp_prog;
@@ -8626,7 +8748,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
@@ -8752,10 +8874,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
if (unlikely((igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
struct net_device *netdev = rx_ring->netdev;
@@ -8782,9 +8900,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
* order to populate the hash, checksum, VLAN, timestamp, protocol, and
* other fields within the skb.
**/
-static void igb_process_skb_fields(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
@@ -8866,6 +8984,38 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
rx_buffer->page = NULL;
}
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+
+ if (status & IGB_XDP_REDIR)
+ xdp_do_flush();
+
+ if (status & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+ }
+}
+
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes)
+{
+ struct igb_ring *ring = q_vector->rx.ring;
+
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.packets += packets;
+ ring->rx_stats.bytes += bytes;
+ u64_stats_update_end(&ring->rx_syncp);
+
+ q_vector->rx.total_packets += packets;
+ q_vector->rx.total_bytes += bytes;
+}
+
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
unsigned int total_bytes = 0, total_packets = 0;
@@ -8873,12 +9023,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
struct igb_ring *rx_ring = q_vector->rx.ring;
u16 cleaned_count = igb_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
- int cpu = smp_processor_id();
unsigned int xdp_xmit = 0;
- struct netdev_queue *nq;
struct xdp_buff xdp;
u32 frame_sz = 0;
int rx_buf_pgcnt;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -8936,12 +9085,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
#endif
- skb = igb_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = igb_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
xdp_xmit |= xdp_res;
igb_rx_buffer_flip(rx_ring, rx_buffer, size);
@@ -8960,9 +9107,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
&xdp, timestamp);
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
@@ -8974,7 +9122,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
continue;
/* verify the packet layout is correct */
- if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || igb_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -8997,24 +9145,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
- if (xdp_xmit & IGB_XDP_REDIR)
- xdp_do_flush();
-
- if (xdp_xmit & IGB_XDP_TX) {
- struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
-
- nq = txring_txq(tx_ring);
- __netif_tx_lock(nq, cpu);
- igb_xdp_ring_update_tail(tx_ring);
- __netif_tx_unlock(nq);
- }
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -9036,6 +9170,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9052,6 +9187,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
__free_pages(page, igb_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9590,8 +9726,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
+ rtnl_lock();
if (netif_running(netdev))
igb_down(adapter);
+ rtnl_unlock();
+
pci_disable_device(pdev);
/* Request a slot reset. */
@@ -9650,16 +9789,21 @@ static void igb_io_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
if (netif_running(netdev)) {
if (!test_bit(__IGB_DOWN, &adapter->state)) {
dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n");
+ rtnl_unlock();
return;
}
+
if (igb_up(adapter)) {
dev_err(&pdev->dev, "igb_up failed after reset\n");
+ rtnl_unlock();
return;
}
}
+ rtnl_unlock();
netif_device_attach(netdev);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index f94570556120..793c96016288 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -502,11 +502,10 @@ static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
+ /* Both the rising and falling edge are timestamped */
+ if (rq->extts.flags & PTP_STRICT_FLAGS &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
if (on) {
@@ -652,13 +651,6 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -1350,6 +1342,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
adapter->ptp_caps.n_pins = IGB_N_SDP;
adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
@@ -1372,6 +1367,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
adapter->ptp_caps.n_pins = IGB_N_SDP;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
new file mode 100644
index 000000000000..5cf67ba29269
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
@@ -0,0 +1,563 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "e1000_hw.h"
+#include "igb.h"
+
+static int igb_realloc_rx_buffer_info(struct igb_ring *ring, bool pool_present)
+{
+ int size = pool_present ?
+ sizeof(*ring->rx_buffer_info_zc) * ring->count :
+ sizeof(*ring->rx_buffer_info) * ring->count;
+ void *buff_info = vmalloc(size);
+
+ if (!buff_info)
+ return -ENOMEM;
+
+ if (pool_present) {
+ vfree(ring->rx_buffer_info);
+ ring->rx_buffer_info = NULL;
+ ring->rx_buffer_info_zc = buff_info;
+ } else {
+ vfree(ring->rx_buffer_info_zc);
+ ring->rx_buffer_info_zc = NULL;
+ ring->rx_buffer_info = buff_info;
+ }
+
+ return 0;
+}
+
+static void igb_txrx_ring_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *tx_ring = adapter->tx_ring[qid];
+ struct igb_ring *rx_ring = adapter->rx_ring[qid];
+ struct e1000_hw *hw = &adapter->hw;
+
+ set_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
+
+ wr32(E1000_TXDCTL(tx_ring->reg_idx), 0);
+ wr32(E1000_RXDCTL(rx_ring->reg_idx), 0);
+
+ synchronize_net();
+
+ /* Rx/Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
+ igb_clean_tx_ring(tx_ring);
+ igb_clean_rx_ring(rx_ring);
+
+ memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+ memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
+}
+
+static void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *tx_ring = adapter->tx_ring[qid];
+ struct igb_ring *rx_ring = adapter->rx_ring[qid];
+
+ igb_configure_tx_ring(adapter, tx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+
+ synchronize_net();
+
+ clear_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
+
+ /* call igb_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean
+ */
+ if (rx_ring->xsk_pool)
+ igb_alloc_rx_buffers_zc(rx_ring, rx_ring->xsk_pool,
+ igb_desc_unused(rx_ring));
+ else
+ igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
+
+ /* Rx/Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+}
+
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+{
+ int qid = ring->queue_index;
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+
+ if (!igb_xdp_is_enabled(adapter))
+ return NULL;
+
+ return (pool && pool->dev) ? pool : NULL;
+}
+
+static int igb_xsk_pool_enable(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IGB_RX_DMA_ATTR);
+ if (err)
+ return err;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ if (if_running) {
+ err = igb_realloc_rx_buffer_info(rx_ring, true);
+ if (!err) {
+ igb_txrx_ring_enable(adapter, qid);
+ /* Kick start the NAPI context so that receiving will start */
+ err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
+ }
+
+ if (err) {
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct xsk_buff_pool *pool;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+ if (!pool)
+ return -EINVAL;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+
+ if (if_running) {
+ err = igb_realloc_rx_buffer_info(rx_ring, false);
+ if (err)
+ return err;
+
+ igb_txrx_ring_enable(adapter, qid);
+ }
+
+ return 0;
+}
+
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ return pool ? igb_xsk_pool_enable(adapter, pool, qid) :
+ igb_xsk_pool_disable(adapter, qid);
+}
+
+static u16 igb_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ union e1000_adv_rx_desc *rx_desc, u16 count)
+{
+ dma_addr_t dma;
+ u16 buffs;
+ int i;
+
+ /* nothing to do */
+ if (!count)
+ return 0;
+
+ buffs = xsk_buff_alloc_batch(pool, xdp, count);
+ for (i = 0; i < buffs; i++) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
+ rx_desc->wb.upper.length = 0;
+
+ rx_desc++;
+ xdp++;
+ }
+
+ return buffs;
+}
+
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
+{
+ u32 nb_buffs_extra = 0, nb_buffs = 0;
+ union e1000_adv_rx_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ u16 total_count = count;
+ struct xdp_buff **xdp;
+
+ rx_desc = IGB_RX_DESC(rx_ring, ntu);
+ xdp = &rx_ring->rx_buffer_info_zc[ntu];
+
+ if (ntu + count >= rx_ring->count) {
+ nb_buffs_extra = igb_fill_rx_descs(xsk_pool, xdp, rx_desc,
+ rx_ring->count - ntu);
+ if (nb_buffs_extra != rx_ring->count - ntu) {
+ ntu += nb_buffs_extra;
+ goto exit;
+ }
+ rx_desc = IGB_RX_DESC(rx_ring, 0);
+ xdp = rx_ring->rx_buffer_info_zc;
+ ntu = 0;
+ count -= nb_buffs_extra;
+ }
+
+ nb_buffs = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count)
+ ntu = 0;
+
+ /* clear the length for the next_to_use descriptor */
+ rx_desc = IGB_RX_DESC(rx_ring, ntu);
+ rx_desc->wb.upper.length = 0;
+
+exit:
+ if (rx_ring->next_to_use != ntu) {
+ rx_ring->next_to_use = ntu;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(ntu, rx_ring->tail);
+ }
+
+ return total_count == (nb_buffs + nb_buffs_extra);
+}
+
+void igb_clean_rx_ring_zc(struct igb_ring *rx_ring)
+{
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
+
+ while (ntc != ntu) {
+ struct xdp_buff *xdp = rx_ring->rx_buffer_info_zc[ntc];
+
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
+ }
+}
+
+static struct sk_buff *igb_construct_skb_zc(struct igb_ring *rx_ring,
+ struct xdp_buff *xdp,
+ ktime_t timestamp)
+{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ struct sk_buff *skb;
+
+ net_prefetch(xdp->data_meta);
+
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
+ if (unlikely(!skb))
+ return NULL;
+
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
+
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
+ return skb;
+}
+
+static int igb_run_xdp_zc(struct igb_adapter *adapter, struct igb_ring *rx_ring,
+ struct xdp_buff *xdp, struct xsk_buff_pool *xsk_pool,
+ struct bpf_prog *xdp_prog)
+{
+ int err, result = IGB_XDP_PASS;
+ u32 act;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+ if (!err)
+ return IGB_XDP_REDIR;
+
+ if (xsk_uses_need_wakeup(xsk_pool) &&
+ err == -ENOBUFS)
+ result = IGB_XDP_EXIT;
+ else
+ result = IGB_XDP_CONSUMED;
+ goto out_failure;
+ }
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ result = igb_xdp_xmit_back(adapter, xdp);
+ if (result == IGB_XDP_CONSUMED)
+ goto out_failure;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ result = IGB_XDP_CONSUMED;
+ break;
+ }
+
+ return result;
+}
+
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
+ struct xsk_buff_pool *xsk_pool, const int budget)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ u32 ntc = rx_ring->next_to_clean;
+ struct bpf_prog *xdp_prog;
+ unsigned int xdp_xmit = 0;
+ bool failure = false;
+ u16 entries_to_alloc;
+ struct sk_buff *skb;
+
+ /* xdp_prog cannot be NULL in the ZC path */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ while (likely(total_packets < budget)) {
+ union e1000_adv_rx_desc *rx_desc;
+ ktime_t timestamp = 0;
+ struct xdp_buff *xdp;
+ unsigned int size;
+ int xdp_res = 0;
+
+ rx_desc = IGB_RX_DESC(rx_ring, ntc);
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ xdp = rx_ring->rx_buffer_info_zc[ntc];
+ xsk_buff_set_size(xdp, size);
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ /* pull rx packet timestamp if available and valid */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ int ts_hdr_len;
+
+ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+ xdp->data,
+ &timestamp);
+
+ xdp->data += ts_hdr_len;
+ xdp->data_meta += ts_hdr_len;
+ size -= ts_hdr_len;
+ }
+
+ xdp_res = igb_run_xdp_zc(adapter, rx_ring, xdp, xsk_pool,
+ xdp_prog);
+
+ if (xdp_res) {
+ if (likely(xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == IGB_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == IGB_XDP_CONSUMED) {
+ xsk_buff_free(xdp);
+ }
+
+ total_packets++;
+ total_bytes += size;
+ ntc++;
+ if (ntc == rx_ring->count)
+ ntc = 0;
+ continue;
+ }
+
+ skb = igb_construct_skb_zc(rx_ring, xdp, timestamp);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ break;
+ }
+
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc == rx_ring->count)
+ ntc = 0;
+
+ if (eth_skb_pad(skb))
+ continue;
+
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+ napi_gro_receive(&q_vector->napi, skb);
+
+ /* update budget accounting */
+ total_packets++;
+ }
+
+ rx_ring->next_to_clean = ntc;
+
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
+
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
+
+ entries_to_alloc = igb_desc_unused(rx_ring);
+ if (entries_to_alloc >= IGB_RX_BUFFER_WRITE)
+ failure |= !igb_alloc_rx_buffers_zc(rx_ring, xsk_pool,
+ entries_to_alloc);
+
+ if (xsk_uses_need_wakeup(xsk_pool)) {
+ if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(xsk_pool);
+
+ return (int)total_packets;
+ }
+ return failure ? budget : (int)total_packets;
+}
+
+bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool)
+{
+ unsigned int budget = igb_desc_unused(tx_ring);
+ u32 cmd_type, olinfo_status, nb_pkts, i = 0;
+ struct xdp_desc *descs = xsk_pool->tx_descs;
+ union e1000_adv_tx_desc *tx_desc = NULL;
+ struct igb_tx_buffer *tx_buffer_info;
+ unsigned int total_bytes = 0;
+ dma_addr_t dma;
+
+ if (!netif_carrier_ok(tx_ring->netdev))
+ return true;
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))
+ return true;
+
+ nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
+ if (!nb_pkts)
+ return true;
+
+ while (nb_pkts-- > 0) {
+ dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ tx_buffer_info->bytecount = descs[i].len;
+ tx_buffer_info->type = IGB_TYPE_XSK;
+ tx_buffer_info->xdpf = NULL;
+ tx_buffer_info->gso_segs = 1;
+ tx_buffer_info->time_stamp = jiffies;
+
+ tx_desc = IGB_TX_DESC(tx_ring, tx_ring->next_to_use);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
+ olinfo_status = descs[i].len << E1000_ADVTXD_PAYLEN_SHIFT;
+
+ /* FIXME: This sets the Report Status (RS) bit for every
+ * descriptor. One nice to have optimization would be to set it
+ * only for the last descriptor in the whole batch. See Intel
+ * ice driver for an example on how to do it.
+ */
+ cmd_type |= descs[i].len | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+
+ total_bytes += descs[i].len;
+
+ i++;
+ tx_ring->next_to_use++;
+ tx_buffer_info->next_to_watch = tx_desc;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+ }
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), total_bytes);
+ igb_xdp_ring_update_tail(tx_ring);
+
+ return nb_pkts < budget;
+}
+
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *ring;
+ u32 eics = 0;
+
+ if (test_bit(__IGB_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!igb_xdp_is_enabled(adapter))
+ return -EINVAL;
+
+ if (qid >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[qid];
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags))
+ return -ENETDOWN;
+
+ if (!READ_ONCE(ring->xsk_pool))
+ return -EINVAL;
+
+ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
+ /* Cause software interrupt */
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+ eics |= ring->q_vector->eims_value;
+ wr32(E1000_EICS, eics);
+ } else {
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 02044aa2181b..e55dd9345833 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1592,7 +1592,7 @@ void igbvf_down(struct igbvf_adapter *adapter)
igbvf_irq_disable(adapter);
- del_timer_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
/* record the stats before reset*/
igbvf_update_stats(adapter);
@@ -1900,7 +1900,8 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
**/
static void igbvf_watchdog(struct timer_list *t)
{
- struct igbvf_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct igbvf_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -2912,7 +2913,7 @@ static void igbvf_remove(struct pci_dev *pdev)
* disable it from being rescheduled.
*/
set_bit(__IGBVF_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index eac0f966e0e4..859a15e4ccba 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -40,6 +40,11 @@ void igc_ethtool_set_ops(struct net_device *);
#define IGC_MAX_TX_TSTAMP_REGS 4
+struct igc_fpe_t {
+ struct ethtool_mmsv mmsv;
+ u32 tx_min_frag_size;
+};
+
enum igc_mac_filter_type {
IGC_MAC_FILTER_TYPE_DST = 0,
IGC_MAC_FILTER_TYPE_SRC
@@ -319,6 +324,7 @@ struct igc_adapter {
struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
ktime_t ptp_reset_start; /* Reset time in clock mono */
struct system_time_snapshot snapshot;
+ struct mutex ptm_lock; /* Only allow one PTM transaction at a time */
char fw_version[32];
@@ -332,6 +338,8 @@ struct igc_adapter {
struct timespec64 period;
} perout[IGC_N_PEROUT];
+ struct igc_fpe_t fpe;
+
/* LEDs */
struct mutex led_mutex;
struct igc_led_classdev *leds;
@@ -386,11 +394,11 @@ extern char igc_driver_name[];
#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
-#define IGC_FLAG_TSN_LEGACY_ENABLED BIT(19)
+#define IGC_FLAG_TSN_PREEMPT_ENABLED BIT(19)
#define IGC_FLAG_TSN_ANY_ENABLED \
(IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \
- IGC_FLAG_TSN_LEGACY_ENABLED)
+ IGC_FLAG_TSN_PREEMPT_ENABLED)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
@@ -577,6 +585,7 @@ struct igc_metadata_request {
struct xsk_tx_metadata *meta;
struct igc_ring *tx_ring;
u32 cmd_type;
+ u16 used_desc;
};
struct igc_q_vector {
@@ -734,7 +743,10 @@ struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
u32 location);
int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
-
+void igc_disable_empty_addr_recv(struct igc_adapter *adapter);
+int igc_enable_empty_addr_recv(struct igc_adapter *adapter);
+struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu);
+void igc_flush_tx_descriptors(struct igc_ring *ring);
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 9fae8bdec2a7..1613b562d17c 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -68,6 +68,10 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
u32 eecd = rd32(IGC_EECD);
u16 size;
+ /* failed to read reg and got all F's */
+ if (!(~eecd))
+ return -ENXIO;
+
size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
@@ -221,6 +225,8 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
/* NVM initialization */
ret_val = igc_init_nvm_params_base(hw);
+ if (ret_val)
+ goto out;
switch (hw->mac.type) {
case igc_i225:
ret_val = igc_init_nvm_params_i225(hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index bf8cdfbba9ff..6320eabb72fe 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -49,6 +49,7 @@ struct igc_adv_tx_context_desc {
#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IGC_ADVTXD_PAYLEN_MASK 0XFFFFC000 /* Adv desc PAYLEN mask */
#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
#define IGC_RAR_ENTRIES 16
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 8e449904aa7d..7189dfc389ad 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -308,6 +308,8 @@
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IGC_TXD_POPTS_SMD_MASK 0x3000 /* Indicates whether it's SMD-V or SMD-R */
+
#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
@@ -363,6 +365,8 @@
#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17)
/* Receive Descriptor bit definitions */
+#define IGC_RXD_STAT_SMD_TYPE_V 0x01 /* SMD-V Packet */
+#define IGC_RXD_STAT_SMD_TYPE_R 0x02 /* SMD-R Packet */
#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
@@ -372,7 +376,8 @@
#define IGC_RXDEXT_STATERR_LB 0x00040000
/* Advanced Receive Descriptor bit definitions */
-#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define IGC_RXDADV_STAT_SMD_TYPE_MASK 0x06000
+#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
#define IGC_RXDEXT_STATERR_L4E 0x20000000
#define IGC_RXDEXT_STATERR_IPE 0x40000000
@@ -396,11 +401,47 @@
#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
-#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
-#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
-#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
-
-#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */
+/* Mask for RX packet buffer size */
+#define IGC_RXPBSIZE_EXP_MASK GENMASK(5, 0)
+#define IGC_BMC2OSPBSIZE_MASK GENMASK(11, 6)
+#define IGC_RXPBSIZE_BE_MASK GENMASK(17, 12)
+/* Mask for timestamp in RX buffer */
+#define IGC_RXPBS_CFG_TS_EN_MASK GENMASK(31, 31)
+/* High-priority RX packet buffer size (KB). Used for Express traffic when preemption is enabled */
+#define IGC_RXPBSIZE_EXP(x) FIELD_PREP(IGC_RXPBSIZE_EXP_MASK, (x))
+/* BMC to OS packet buffer size in KB */
+#define IGC_BMC2OSPBSIZE(x) FIELD_PREP(IGC_BMC2OSPBSIZE_MASK, (x))
+/* Low-priority RX packet buffer size (KB). Used for BE traffic when preemption is enabled */
+#define IGC_RXPBSIZE_BE(x) FIELD_PREP(IGC_RXPBSIZE_BE_MASK, (x))
+/* Enable RX packet buffer for timestamp descriptor, saving 16 bytes per packet if set */
+#define IGC_RXPBS_CFG_TS_EN FIELD_PREP(IGC_RXPBS_CFG_TS_EN_MASK, 1)
+/* Default value following I225/I226 SW User Manual Section 8.3.1 */
+#define IGC_RXPBSIZE_EXP_BMC_DEFAULT ( \
+ IGC_RXPBSIZE_EXP(34) | IGC_BMC2OSPBSIZE(2))
+#define IGC_RXPBSIZE_EXP_BMC_BE_TSN ( \
+ IGC_RXPBSIZE_EXP(15) | IGC_BMC2OSPBSIZE(2) | IGC_RXPBSIZE_BE(15))
+
+/* Mask for TX packet buffer size */
+#define IGC_TXPB0SIZE_MASK GENMASK(5, 0)
+#define IGC_TXPB1SIZE_MASK GENMASK(11, 6)
+#define IGC_TXPB2SIZE_MASK GENMASK(17, 12)
+#define IGC_TXPB3SIZE_MASK GENMASK(23, 18)
+/* Mask for OS to BMC packet buffer size */
+#define IGC_OS2BMCPBSIZE_MASK GENMASK(29, 24)
+/* TX Packet buffer size in KB */
+#define IGC_TXPB0SIZE(x) FIELD_PREP(IGC_TXPB0SIZE_MASK, (x))
+#define IGC_TXPB1SIZE(x) FIELD_PREP(IGC_TXPB1SIZE_MASK, (x))
+#define IGC_TXPB2SIZE(x) FIELD_PREP(IGC_TXPB2SIZE_MASK, (x))
+#define IGC_TXPB3SIZE(x) FIELD_PREP(IGC_TXPB3SIZE_MASK, (x))
+/* OS to BMC packet buffer size in KB */
+#define IGC_OS2BMCPBSIZE(x) FIELD_PREP(IGC_OS2BMCPBSIZE_MASK, (x))
+/* Default value following I225/I226 SW User Manual Section 8.3.2 */
+#define IGC_TXPBSIZE_DEFAULT ( \
+ IGC_TXPB0SIZE(20) | IGC_TXPB1SIZE(0) | IGC_TXPB2SIZE(0) | \
+ IGC_TXPB3SIZE(0) | IGC_OS2BMCPBSIZE(4))
+#define IGC_TXPBSIZE_TSN ( \
+ IGC_TXPB0SIZE(7) | IGC_TXPB1SIZE(7) | IGC_TXPB2SIZE(7) | \
+ IGC_TXPB3SIZE(7) | IGC_OS2BMCPBSIZE(4))
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
@@ -539,8 +580,10 @@
/* Transmit Scheduling */
#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
+#define IGC_TQAVCTRL_PREEMPT_ENA 0x00000002
#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080
+#define IGC_TQAVCTRL_MIN_FRAG_MASK 0x0000C000
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
@@ -574,7 +617,10 @@
#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2)
#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8)
-#define IGC_PTM_SHORT_CYC_DEFAULT 1 /* Default short cycle interval */
+/* A short cycle time of 1us theoretically should work, but appears to be too
+ * short in practice.
+ */
+#define IGC_PTM_SHORT_CYC_DEFAULT 4 /* Default short cycle interval */
#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */
#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */
@@ -593,6 +639,7 @@
#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */
#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */
#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */
+#define IGC_PTM_STAT_ALL GENMASK(5, 0) /* Used to clear all status */
/* PCIe PTM Cycle Control */
#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 817838677817..3fc1eded9605 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -8,6 +8,7 @@
#include "igc.h"
#include "igc_diag.h"
+#include "igc_tsn.h"
/* forward declaration */
struct igc_stats {
@@ -1781,6 +1782,83 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return 0;
}
+static int igc_ethtool_get_mm(struct net_device *netdev,
+ struct ethtool_mm_state *cmd)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_fpe_t *fpe = &adapter->fpe;
+
+ ethtool_mmsv_get_mm(&fpe->mmsv, cmd);
+ cmd->tx_min_frag_size = fpe->tx_min_frag_size;
+ cmd->rx_min_frag_size = IGC_RX_MIN_FRAG_SIZE;
+
+ return 0;
+}
+
+static int igc_ethtool_set_mm(struct net_device *netdev,
+ struct ethtool_mm_cfg *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_fpe_t *fpe = &adapter->fpe;
+
+ fpe->tx_min_frag_size = igc_fpe_get_supported_frag_size(cmd->tx_min_frag_size);
+ if (fpe->tx_min_frag_size != cmd->tx_min_frag_size)
+ NL_SET_ERR_MSG_MOD(extack,
+ "tx-min-frag-size value set is unsupported. Rounded up to supported value (64, 128, 192, 256)");
+
+ if (fpe->mmsv.pmac_enabled != cmd->pmac_enabled) {
+ if (cmd->pmac_enabled)
+ static_branch_inc(&igc_fpe_enabled);
+ else
+ static_branch_dec(&igc_fpe_enabled);
+ }
+
+ ethtool_mmsv_set_mm(&fpe->mmsv, cmd);
+
+ return igc_tsn_offload_apply(adapter);
+}
+
+/**
+ * igc_ethtool_get_frame_ass_error - Get the frame assembly error count.
+ * @reg_value: Register value for IGC_PRMEXCPRCNT
+ * Return: The count of frame assembly errors.
+ */
+static u64 igc_ethtool_get_frame_ass_error(u32 reg_value)
+{
+ /* Out of order statistics */
+ u32 ooo_frame_cnt, ooo_frag_cnt;
+ u32 miss_frame_frag_cnt;
+
+ ooo_frame_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAME_CNT, reg_value);
+ ooo_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAG_CNT, reg_value);
+ miss_frame_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT,
+ reg_value);
+
+ return ooo_frame_cnt + ooo_frag_cnt + miss_frame_frag_cnt;
+}
+
+static u64 igc_ethtool_get_frame_smd_error(u32 reg_value)
+{
+ return FIELD_GET(IGC_PRMEXCPRCNT_OOO_SMDC, reg_value);
+}
+
+static void igc_ethtool_get_mm_stats(struct net_device *dev,
+ struct ethtool_mm_stats *stats)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 reg_value;
+
+ reg_value = rd32(IGC_PRMEXCPRCNT);
+
+ stats->MACMergeFrameAssErrorCount = igc_ethtool_get_frame_ass_error(reg_value);
+ stats->MACMergeFrameSmdErrorCount = igc_ethtool_get_frame_smd_error(reg_value);
+ stats->MACMergeFrameAssOkCount = rd32(IGC_PRMPTDRCNT);
+ stats->MACMergeFragCountRx = rd32(IGC_PRMEVNTRCNT);
+ stats->MACMergeFragCountTx = rd32(IGC_PRMEVNTTCNT);
+}
+
static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
@@ -2076,6 +2154,9 @@ static const struct ethtool_ops igc_ethtool_ops = {
.get_link_ksettings = igc_ethtool_get_link_ksettings,
.set_link_ksettings = igc_ethtool_set_link_ksettings,
.self_test = igc_ethtool_diag_test,
+ .get_mm = igc_ethtool_get_mm,
+ .get_mm_stats = igc_ethtool_get_mm_stats,
+ .set_mm = igc_ethtool_set_mm,
};
void igc_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index d9d1a1a11daf..be8a49a86d09 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -279,9 +279,4 @@ struct net_device *igc_get_hw_dev(struct igc_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igc_get_hw_dev(hw), format, ##arg)
-s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
-s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
-void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
-void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
-
#endif /* _IGC_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 27872bdea9bd..031c332f66c4 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1092,10 +1092,12 @@ static int igc_init_empty_frame(struct igc_ring *ring,
dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(ring->dev, dma)) {
- netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ net_err_ratelimited("%s: DMA mapping error for empty frame\n",
+ netdev_name(ring->netdev));
return -ENOMEM;
}
+ buffer->type = IGC_TX_BUFFER_TYPE_SKB;
buffer->skb = skb;
buffer->protocol = 0;
buffer->bytecount = skb->len;
@@ -1107,20 +1109,12 @@ static int igc_init_empty_frame(struct igc_ring *ring,
return 0;
}
-static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
- struct sk_buff *skb,
- struct igc_tx_buffer *first)
+static void igc_init_tx_empty_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ struct igc_tx_buffer *first)
{
union igc_adv_tx_desc *desc;
u32 cmd_type, olinfo_status;
- int err;
-
- if (!igc_desc_unused(ring))
- return -EBUSY;
-
- err = igc_init_empty_frame(ring, first, skb);
- if (err)
- return err;
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
@@ -1139,8 +1133,6 @@ static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
ring->next_to_use++;
if (ring->next_to_use == ring->count)
ring->next_to_use = 0;
-
- return 0;
}
#define IGC_EMPTY_FRAME_SIZE 60
@@ -1566,6 +1558,40 @@ static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *s
return false;
}
+static int igc_insert_empty_frame(struct igc_ring *tx_ring)
+{
+ struct igc_tx_buffer *empty_info;
+ struct sk_buff *empty_skb;
+ void *data;
+ int ret;
+
+ empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ empty_skb = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
+ if (unlikely(!empty_skb)) {
+ net_err_ratelimited("%s: skb alloc error for empty frame\n",
+ netdev_name(tx_ring->netdev));
+ return -ENOMEM;
+ }
+
+ data = skb_put(empty_skb, IGC_EMPTY_FRAME_SIZE);
+ memset(data, 0, IGC_EMPTY_FRAME_SIZE);
+
+ /* Prepare DMA mapping and Tx buffer information */
+ ret = igc_init_empty_frame(tx_ring, empty_info, empty_skb);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(empty_skb);
+ return ret;
+ }
+
+ /* Prepare advanced context descriptor for empty packet */
+ igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
+
+ /* Prepare advanced data descriptor for empty packet */
+ igc_init_tx_empty_descriptor(tx_ring, empty_skb, empty_info);
+
+ return 0;
+}
+
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
@@ -1585,6 +1611,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor,
+ * + 2 desc for inserting an empty packet for launch time,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
@@ -1604,24 +1631,16 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
if (insert_empty) {
- struct igc_tx_buffer *empty_info;
- struct sk_buff *empty;
- void *data;
-
- empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
- empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
- if (!empty)
- goto done;
-
- data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
- memset(data, 0, IGC_EMPTY_FRAME_SIZE);
-
- igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
-
- if (igc_init_tx_empty_descriptor(tx_ring,
- empty,
- empty_info) < 0)
- dev_kfree_skb_any(empty);
+ /* Reset the launch time if the required empty frame fails to
+ * be inserted. However, this packet is not dropped, so it
+ * "dirties" the current Qbv cycle. This ensures that the
+ * upcoming packet, which is scheduled in the next Qbv cycle,
+ * does not require an empty frame. This way, the launch time
+ * continues to function correctly despite the current failure
+ * to insert the empty frame.
+ */
+ if (igc_insert_empty_frame(tx_ring))
+ launch_time = 0;
}
done:
@@ -1649,7 +1668,8 @@ done:
if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES)
+ if (skb->sk &&
+ READ_ONCE(skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1;
} else {
adapter->tx_hwtstamp_skipped++;
@@ -2123,10 +2143,6 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
struct net_device *netdev = rx_ring->netdev;
@@ -2448,8 +2464,7 @@ unmap:
return -ENOMEM;
}
-static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
- int cpu)
+struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu)
{
int index = cpu;
@@ -2473,7 +2488,7 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
if (unlikely(!xdpf))
return -EFAULT;
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -2515,8 +2530,7 @@ out_failure:
}
}
-static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
- struct xdp_buff *xdp)
+static int igc_xdp_run_prog(struct igc_adapter *adapter, struct xdp_buff *xdp)
{
struct bpf_prog *prog;
int res;
@@ -2530,11 +2544,11 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
res = __igc_xdp_run_prog(adapter, prog, xdp);
out:
- return ERR_PTR(-res);
+ return res;
}
/* This function assumes __netif_tx_lock is held by the caller. */
-static void igc_flush_tx_descriptors(struct igc_ring *ring)
+void igc_flush_tx_descriptors(struct igc_ring *ring)
{
/* Once tail pointer is updated, hardware can fetch the descriptors
* any time so we issue a write membar here to ensure all memory
@@ -2551,7 +2565,7 @@ static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
struct igc_ring *ring;
if (status & IGC_XDP_TX) {
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -2585,6 +2599,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = igc_desc_unused(rx_ring);
int xdp_status = 0, rx_buffer_pgcnt;
+ int xdp_res = 0;
while (likely(total_packets < budget)) {
struct igc_xdp_buff ctx = { .rx_ts = NULL };
@@ -2622,6 +2637,14 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
size -= IGC_TS_HDR_LEN;
}
+ if (igc_fpe_is_pmac_enabled(adapter) &&
+ igc_fpe_handle_mpacket(adapter, rx_desc, size, pktbuf)) {
+ /* Advance the ring next-to-clean */
+ igc_is_non_eop(rx_ring, rx_desc);
+ cleaned_count++;
+ continue;
+ }
+
if (!skb) {
xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
@@ -2630,12 +2653,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
xdp_buff_clear_frags_flag(&ctx.xdp);
ctx.rx_desc = rx_desc;
- skb = igc_xdp_run_prog(adapter, &ctx.xdp);
+ xdp_res = igc_xdp_run_prog(adapter, &ctx.xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
switch (xdp_res) {
case IGC_XDP_CONSUMED:
rx_buffer->pagecnt_bias++;
@@ -2657,7 +2678,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
skb = igc_construct_skb(rx_ring, rx_buffer, &ctx);
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
@@ -2672,7 +2693,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
continue;
/* verify the packet layout is correct */
- if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || igc_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -2707,8 +2728,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
}
static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
- struct xdp_buff *xdp)
+ struct igc_xdp_buff *ctx)
{
+ struct xdp_buff *xdp = &ctx->xdp;
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
@@ -2727,27 +2749,28 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
__skb_pull(skb, metasize);
}
+ if (ctx->rx_ts) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
+ skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
+ }
+
return skb;
}
static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
union igc_adv_rx_desc *desc,
- struct xdp_buff *xdp,
- ktime_t timestamp)
+ struct igc_xdp_buff *ctx)
{
struct igc_ring *ring = q_vector->rx.ring;
struct sk_buff *skb;
- skb = igc_construct_skb_zc(ring, xdp);
+ skb = igc_construct_skb_zc(ring, ctx);
if (!skb) {
ring->rx_stats.alloc_failed++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags);
return;
}
- if (timestamp)
- skb_hwtstamps(skb)->hwtstamp = timestamp;
-
if (igc_cleanup_headers(ring, desc, skb))
return;
@@ -2783,7 +2806,6 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
union igc_adv_rx_desc *desc;
struct igc_rx_buffer *bi;
struct igc_xdp_buff *ctx;
- ktime_t timestamp = 0;
unsigned int size;
int res;
@@ -2813,6 +2835,8 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
*/
bi->xdp->data_meta += IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
+ } else {
+ ctx->rx_ts = NULL;
}
bi->xdp->data_end = bi->xdp->data + size;
@@ -2821,7 +2845,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
switch (res) {
case IGC_XDP_PASS:
- igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
+ igc_dispatch_skb_zc(q_vector, desc, ctx);
fallthrough;
case IGC_XDP_CONSUMED:
xsk_buff_free(bi->xdp);
@@ -2955,9 +2979,48 @@ static u64 igc_xsk_fill_timestamp(void *_priv)
return *(u64 *)_priv;
}
+static void igc_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct igc_metadata_request *meta_req = _priv;
+ struct igc_ring *tx_ring = meta_req->tx_ring;
+ __le32 launch_time_offset;
+ bool insert_empty = false;
+ bool first_flag = false;
+ u16 used_desc = 0;
+
+ if (!tx_ring->launchtime_enable)
+ return;
+
+ launch_time_offset = igc_tx_launchtime(tx_ring,
+ ns_to_ktime(launch_time),
+ &first_flag, &insert_empty);
+ if (insert_empty) {
+ /* Disregard the launch time request if the required empty frame
+ * fails to be inserted.
+ */
+ if (igc_insert_empty_frame(tx_ring))
+ return;
+
+ meta_req->tx_buffer =
+ &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ /* Inserting an empty packet requires two descriptors:
+ * one data descriptor and one context descriptor.
+ */
+ used_desc += 2;
+ }
+
+ /* Use one context descriptor to specify launch time and first flag. */
+ igc_tx_ctxtdesc(tx_ring, launch_time_offset, first_flag, 0, 0, 0);
+ used_desc += 1;
+
+ /* Update the number of used descriptors in this request */
+ meta_req->used_desc += used_desc;
+}
+
const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = {
.tmo_request_timestamp = igc_xsk_request_timestamp,
.tmo_fill_timestamp = igc_xsk_fill_timestamp,
+ .tmo_request_launch_time = igc_xsk_request_launch_time,
};
static void igc_xdp_xmit_zc(struct igc_ring *ring)
@@ -2980,7 +3043,13 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
ntu = ring->next_to_use;
budget = igc_desc_unused(ring);
- while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
+ /* Packets with launch time require one data descriptor and one context
+ * descriptor. When the launch time falls into the next Qbv cycle, we
+ * may need to insert an empty packet, which requires two more
+ * descriptors. Therefore, to be safe, we always ensure we have at least
+ * 4 descriptors available.
+ */
+ while (budget >= 4 && xsk_tx_peek_desc(pool, &xdp_desc)) {
struct igc_metadata_request meta_req;
struct xsk_tx_metadata *meta = NULL;
struct igc_tx_buffer *bi;
@@ -3001,9 +3070,19 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
meta_req.tx_ring = ring;
meta_req.tx_buffer = bi;
meta_req.meta = meta;
+ meta_req.used_desc = 0;
xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops,
&meta_req);
+ /* xsk_tx_metadata_request() may have updated next_to_use */
+ ntu = ring->next_to_use;
+
+ /* xsk_tx_metadata_request() may have updated Tx buffer info */
+ bi = meta_req.tx_buffer;
+
+ /* xsk_tx_metadata_request() may use a few descriptors */
+ budget -= meta_req.used_desc;
+
tx_desc = IGC_TX_DESC(ring, ntu);
tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
@@ -3021,9 +3100,11 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
ntu++;
if (ntu == ring->count)
ntu = 0;
+
+ ring->next_to_use = ntu;
+ budget--;
}
- ring->next_to_use = ntu;
if (tx_desc) {
igc_flush_tx_descriptors(ring);
xsk_tx_release(pool);
@@ -3071,6 +3152,11 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
break;
+ if (igc_fpe_is_pmac_enabled(adapter) &&
+ igc_fpe_transmitted_smd_v(tx_desc))
+ ethtool_mmsv_event_handle(&adapter->fpe.mmsv,
+ ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
+
/* Hold the completions while there's a pending tx hardware
* timestamp request from XDP Tx metadata.
*/
@@ -3963,6 +4049,30 @@ static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
}
/**
+ * igc_enable_empty_addr_recv - Enable Rx of packets with all-zeroes MAC address
+ * @adapter: Pointer to the igc_adapter structure.
+ *
+ * Frame preemption verification requires that packets with the all-zeroes
+ * MAC address are allowed to be received by the driver. This function adds the
+ * all-zeroes destination address to the list of acceptable addresses.
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+int igc_enable_empty_addr_recv(struct igc_adapter *adapter)
+{
+ u8 empty[ETH_ALEN] = {};
+
+ return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty, -1);
+}
+
+void igc_disable_empty_addr_recv(struct igc_adapter *adapter)
+{
+ u8 empty[ETH_ALEN] = {};
+
+ igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty);
+}
+
+/**
* igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -4948,6 +5058,22 @@ static int igc_sw_init(struct igc_adapter *adapter)
return 0;
}
+static void igc_set_queue_napi(struct igc_adapter *adapter, int vector,
+ struct napi_struct *napi)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->rx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->rx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ if (q_vector->tx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->tx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -4955,6 +5081,7 @@ static int igc_sw_init(struct igc_adapter *adapter)
void igc_up(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int i = 0;
/* hardware has been reset, we need to reload some things */
@@ -4962,8 +5089,11 @@ void igc_up(struct igc_adapter *adapter)
clear_bit(__IGC_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&adapter->q_vector[i]->napi);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igc_set_queue_napi(adapter, i, napi);
+ }
if (adapter->msix_entries)
igc_configure_msix(adapter);
@@ -5192,12 +5322,13 @@ void igc_down(struct igc_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
+ igc_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
/* record the stats before reset*/
spin_lock(&adapter->stats64_lock);
@@ -5216,6 +5347,9 @@ void igc_down(struct igc_adapter *adapter)
igc_disable_all_tx_rings_hw(adapter);
igc_clean_all_tx_rings(adapter);
igc_clean_all_rx_rings(adapter);
+
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_stop(&adapter->fpe.mmsv);
}
void igc_reinit_locked(struct igc_adapter *adapter)
@@ -5576,6 +5710,9 @@ static int igc_request_msix(struct igc_adapter *adapter)
q_vector);
if (err)
goto err_free;
+
+ netif_napi_set_irq(&q_vector->napi,
+ adapter->msix_entries[vector].vector);
}
igc_configure_msix(adapter);
@@ -5612,7 +5749,8 @@ static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
*/
static void igc_update_phy_info(struct timer_list *t)
{
- struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+ struct igc_adapter *adapter = timer_container_of(adapter, t,
+ phy_info_timer);
igc_get_phy_info(&adapter->hw);
}
@@ -5654,7 +5792,8 @@ bool igc_has_link(struct igc_adapter *adapter)
*/
static void igc_watchdog(struct timer_list *t)
{
- struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct igc_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
}
@@ -5737,6 +5876,10 @@ static void igc_watchdog_task(struct work_struct *work)
*/
igc_tsn_adjust_txtime_offset(adapter);
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv,
+ true);
+
if (adapter->link_speed != SPEED_1000)
goto no_wait;
@@ -5772,6 +5915,10 @@ no_wait:
netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv,
+ false);
+
/* link state has changed, schedule phy info update */
if (!test_bit(__IGC_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
@@ -6018,6 +6165,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
struct igc_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int err = 0;
int i = 0;
@@ -6053,8 +6201,11 @@ static int __igc_open(struct net_device *netdev, bool resuming)
clear_bit(__IGC_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&adapter->q_vector[i]->napi);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igc_set_queue_napi(adapter, i, napi);
+ }
/* Clear any pending interrupts. */
rd32(IGC_ICR);
@@ -6337,6 +6488,10 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!validate_schedule(adapter, qopt))
return -EINVAL;
+ /* preemptible isn't supported yet */
+ if (qopt->mqprio.preemptible_tcs)
+ return -EOPNOTSUPP;
+
igc_ptp_read(adapter, &now);
if (igc_tsn_is_taprio_activated_by_user(adapter) &&
@@ -6577,13 +6732,14 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio)
{
struct igc_hw *hw = &adapter->hw;
- int i;
+ int err, i;
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
if (!mqprio->qopt.num_tc) {
adapter->strict_priority_enable = false;
+ netdev_reset_tc(adapter->netdev);
goto apply;
}
@@ -6614,6 +6770,21 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
igc_save_mqprio_params(adapter, mqprio->qopt.num_tc,
mqprio->qopt.offset);
+ err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
+ if (err)
+ return err;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ err = netdev_set_tc_queue(adapter->netdev, i, 1,
+ adapter->queue_per_tc[i]);
+ if (err)
+ return err;
+ }
+
+ /* In case the card is configured with less than four queues. */
+ for (; i < IGC_MAX_TX_QUEUES; i++)
+ adapter->queue_per_tc[i] = i;
+
mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
apply:
@@ -6677,7 +6848,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -6779,45 +6950,6 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_get_tstamp = igc_get_tstamp,
};
-/* PCIe configuration access */
-void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- pci_read_config_word(adapter->pdev, reg, value);
-}
-
-void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- pci_write_config_word(adapter->pdev, reg, *value);
-}
-
-s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- if (!pci_is_pcie(adapter->pdev))
- return -IGC_ERR_CONFIG;
-
- pcie_capability_read_word(adapter->pdev, reg, value);
-
- return IGC_SUCCESS;
-}
-
-s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- if (!pci_is_pcie(adapter->pdev))
- return -IGC_ERR_CONFIG;
-
- pcie_capability_write_word(adapter->pdev, reg, *value);
-
- return IGC_SUCCESS;
-}
-
u32 igc_rd32(struct igc_hw *hw, u32 reg)
{
struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
@@ -6983,6 +7115,10 @@ static int igc_probe(struct pci_dev *pdev,
adapter->port_num = hw->bus.func;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ /* Disable ASPM L1.2 on I226 devices to avoid packet loss */
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+
err = pci_save_state(pdev);
if (err)
goto err_ioremap;
@@ -7062,6 +7198,9 @@ static int igc_probe(struct pci_dev *pdev,
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ /* enable HW vlan tag insertion/stripping by default */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -7094,8 +7233,8 @@ static int igc_probe(struct pci_dev *pdev,
}
/* configure RXPBSIZE and TXPBSIZE */
- wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
- wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_RXPBS, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
@@ -7103,8 +7242,8 @@ static int igc_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->reset_task, igc_reset_task);
INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
- hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- adapter->hrtimer.function = &igc_qbv_scheduling_timer;
+ hrtimer_setup(&adapter->hrtimer, &igc_qbv_scheduling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Initialize link properties that are user-changeable */
adapter->fc_autoneg = true;
@@ -7127,6 +7266,8 @@ static int igc_probe(struct pci_dev *pdev,
igc_tsn_clear_schedule(adapter);
+ igc_fpe_init(adapter);
+
/* reset the hardware with the new settings */
igc_reset(adapter);
@@ -7168,6 +7309,7 @@ static int igc_probe(struct pci_dev *pdev,
err_register:
igc_release_hw_control(adapter);
+ igc_ptp_stop(adapter);
err_eeprom:
if (!igc_check_reset_block(hw))
igc_reset_phy(hw);
@@ -7209,8 +7351,8 @@ static void igc_remove(struct pci_dev *pdev)
set_bit(__IGC_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
@@ -7338,7 +7480,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int igc_resume(struct device *dev)
+static int __igc_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7362,6 +7504,9 @@ static int igc_resume(struct device *dev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+
if (igc_init_interrupt_scheme(adapter, true)) {
netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
@@ -7381,7 +7526,11 @@ static int igc_resume(struct device *dev)
wr32(IGC_WUS, ~0);
if (netif_running(netdev)) {
+ if (!rpm)
+ rtnl_lock();
err = __igc_open(netdev, true);
+ if (!rpm)
+ rtnl_unlock();
if (!err)
netif_device_attach(netdev);
}
@@ -7389,9 +7538,14 @@ static int igc_resume(struct device *dev)
return err;
}
+static int igc_resume(struct device *dev)
+{
+ return __igc_resume(dev, false);
+}
+
static int igc_runtime_resume(struct device *dev)
{
- return igc_resume(dev);
+ return __igc_resume(dev, true);
}
static int igc_suspend(struct device *dev)
@@ -7436,14 +7590,18 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
igc_down(adapter);
pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7454,7 +7612,7 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igc_resume routine.
+ * resembles the first-half of the __igc_resume routine.
**/
static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
{
@@ -7474,6 +7632,9 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2);
+
/* In case of PCI error, adapter loses its HW address
* so we should re-assign it here.
*/
@@ -7493,7 +7654,7 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the igc_resume routine.
+ * second-half of the __igc_resume routine.
*/
static void igc_io_resume(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c
index 58f81aba0144..efd121c03967 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.c
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.c
@@ -36,56 +36,6 @@ static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg)
}
/**
- * igc_acquire_nvm - Generic request for access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Set the EEPROM access request bit and wait for EEPROM access grant bit.
- * Return successful if access grant bit set, else clear the request for
- * EEPROM access and return -IGC_ERR_NVM (-1).
- */
-s32 igc_acquire_nvm(struct igc_hw *hw)
-{
- s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
- u32 eecd = rd32(IGC_EECD);
- s32 ret_val = 0;
-
- wr32(IGC_EECD, eecd | IGC_EECD_REQ);
- eecd = rd32(IGC_EECD);
-
- while (timeout) {
- if (eecd & IGC_EECD_GNT)
- break;
- udelay(5);
- eecd = rd32(IGC_EECD);
- timeout--;
- }
-
- if (!timeout) {
- eecd &= ~IGC_EECD_REQ;
- wr32(IGC_EECD, eecd);
- hw_dbg("Could not acquire NVM grant\n");
- ret_val = -IGC_ERR_NVM;
- }
-
- return ret_val;
-}
-
-/**
- * igc_release_nvm - Release exclusive access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Stop any current commands to the EEPROM and clear the EEPROM request bit.
- */
-void igc_release_nvm(struct igc_hw *hw)
-{
- u32 eecd;
-
- eecd = rd32(IGC_EECD);
- eecd &= ~IGC_EECD_REQ;
- wr32(IGC_EECD, eecd);
-}
-
-/**
* igc_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.h b/drivers/net/ethernet/intel/igc/igc_nvm.h
index f9fc2e9cfb03..ab78d0c64547 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.h
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.h
@@ -4,8 +4,6 @@
#ifndef _IGC_NVM_H_
#define _IGC_NVM_H_
-s32 igc_acquire_nvm(struct igc_hw *hw);
-void igc_release_nvm(struct igc_hw *hw);
s32 igc_read_mac_addr(struct igc_hw *hw);
s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data);
s32 igc_validate_nvm_checksum(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 946edbad4302..f4f5c28615d3 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -257,13 +257,6 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -300,10 +293,6 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags)
- return -EOPNOTSUPP;
-
if (on) {
pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
@@ -974,45 +963,62 @@ static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat)
}
}
+/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_trigger() */
+static void igc_ptm_trigger(struct igc_hw *hw)
+{
+ u32 ctrl;
+
+ /* To "manually" start the PTM cycle we need to set the
+ * trigger (TRIG) bit
+ */
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl |= IGC_PTM_CTRL_TRIG;
+ wr32(IGC_PTM_CTRL, ctrl);
+ /* Perform flush after write to CTRL register otherwise
+ * transaction may not start
+ */
+ wrfl();
+}
+
+/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_reset() */
+static void igc_ptm_reset(struct igc_hw *hw)
+{
+ u32 ctrl;
+
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl &= ~IGC_PTM_CTRL_TRIG;
+ wr32(IGC_PTM_CTRL, ctrl);
+ /* Write to clear all status */
+ wr32(IGC_PTM_STAT, IGC_PTM_STAT_ALL);
+}
+
static int igc_phc_get_syncdevicetime(ktime_t *device,
struct system_counterval_t *system,
void *ctx)
{
- u32 stat, t2_curr_h, t2_curr_l, ctrl;
struct igc_adapter *adapter = ctx;
struct igc_hw *hw = &adapter->hw;
+ u32 stat, t2_curr_h, t2_curr_l;
int err, count = 100;
ktime_t t1, t2_curr;
- /* Get a snapshot of system clocks to use as historic value. */
- ktime_get_snapshot(&adapter->snapshot);
-
+ /* Doing this in a loop because in the event of a
+ * badly timed (ha!) system clock adjustment, we may
+ * get PTM errors from the PCI root, but these errors
+ * are transitory. Repeating the process returns valid
+ * data eventually.
+ */
do {
- /* Doing this in a loop because in the event of a
- * badly timed (ha!) system clock adjustment, we may
- * get PTM errors from the PCI root, but these errors
- * are transitory. Repeating the process returns valid
- * data eventually.
- */
-
- /* To "manually" start the PTM cycle we need to clear and
- * then set again the TRIG bit.
- */
- ctrl = rd32(IGC_PTM_CTRL);
- ctrl &= ~IGC_PTM_CTRL_TRIG;
- wr32(IGC_PTM_CTRL, ctrl);
- ctrl |= IGC_PTM_CTRL_TRIG;
- wr32(IGC_PTM_CTRL, ctrl);
+ /* Get a snapshot of system clocks to use as historic value. */
+ ktime_get_snapshot(&adapter->snapshot);
- /* The cycle only starts "for real" when software notifies
- * that it has read the registers, this is done by setting
- * VALID bit.
- */
- wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
+ igc_ptm_trigger(hw);
err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat,
stat, IGC_PTM_STAT_SLEEP,
IGC_PTM_STAT_TIMEOUT);
+ igc_ptm_reset(hw);
+
if (err < 0) {
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
return err;
@@ -1021,15 +1027,7 @@ static int igc_phc_get_syncdevicetime(ktime_t *device,
if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID)
break;
- if (stat & ~IGC_PTM_STAT_VALID) {
- /* An error occurred, log it. */
- igc_ptm_log_error(adapter, stat);
- /* The STAT register is write-1-to-clear (W1C),
- * so write the previous error status to clear it.
- */
- wr32(IGC_PTM_STAT, stat);
- continue;
- }
+ igc_ptm_log_error(adapter, stat);
} while (--count);
if (!count) {
@@ -1061,9 +1059,16 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
{
struct igc_adapter *adapter = container_of(ptp, struct igc_adapter,
ptp_caps);
+ int ret;
+
+ /* This blocks until any in progress PTM transactions complete */
+ mutex_lock(&adapter->ptm_lock);
- return get_device_system_crosststamp(igc_phc_get_syncdevicetime,
- adapter, &adapter->snapshot, cts);
+ ret = get_device_system_crosststamp(igc_phc_get_syncdevicetime,
+ adapter, &adapter->snapshot, cts);
+ mutex_unlock(&adapter->ptm_lock);
+
+ return ret;
}
static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp,
@@ -1146,6 +1151,9 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS;
adapter->ptp_caps.n_per_out = IGC_N_PEROUT;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.n_pins = IGC_N_SDP;
adapter->ptp_caps.verify = igc_ptp_verify_pin;
@@ -1162,6 +1170,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
spin_lock_init(&adapter->ptp_tx_lock);
spin_lock_init(&adapter->free_timer_lock);
spin_lock_init(&adapter->tmreg_lock);
+ mutex_init(&adapter->ptm_lock);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
@@ -1174,6 +1183,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
netdev_err(netdev, "ptp_clock_register failed\n");
+ mutex_destroy(&adapter->ptm_lock);
} else if (adapter->ptp_clock) {
netdev_info(netdev, "PHC added\n");
adapter->ptp_flags |= IGC_PTP_ENABLED;
@@ -1203,10 +1213,12 @@ static void igc_ptm_stop(struct igc_adapter *adapter)
struct igc_hw *hw = &adapter->hw;
u32 ctrl;
+ mutex_lock(&adapter->ptm_lock);
ctrl = rd32(IGC_PTM_CTRL);
ctrl &= ~IGC_PTM_CTRL_EN;
wr32(IGC_PTM_CTRL, ctrl);
+ mutex_unlock(&adapter->ptm_lock);
}
/**
@@ -1237,13 +1249,18 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
**/
void igc_ptp_stop(struct igc_adapter *adapter)
{
+ if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
+ return;
+
igc_ptp_suspend(adapter);
+ adapter->ptp_flags &= ~IGC_PTP_ENABLED;
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
netdev_info(adapter->netdev, "PHC removed\n");
adapter->ptp_flags &= ~IGC_PTP_ENABLED;
}
+ mutex_destroy(&adapter->ptm_lock);
}
/**
@@ -1255,13 +1272,18 @@ void igc_ptp_stop(struct igc_adapter *adapter)
void igc_ptp_reset(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
- u32 cycle_ctrl, ctrl;
+ u32 cycle_ctrl, ctrl, stat;
unsigned long flags;
u32 timadj;
+ if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
+ return;
+
/* reset the tstamp_config */
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+ mutex_lock(&adapter->ptm_lock);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
switch (adapter->hw.mac.type) {
@@ -1290,14 +1312,19 @@ void igc_ptp_reset(struct igc_adapter *adapter)
ctrl = IGC_PTM_CTRL_EN |
IGC_PTM_CTRL_START_NOW |
IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) |
- IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) |
- IGC_PTM_CTRL_TRIG;
+ IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT);
wr32(IGC_PTM_CTRL, ctrl);
/* Force the first cycle to run. */
- wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
+ igc_ptm_trigger(hw);
+ if (readx_poll_timeout_atomic(rd32, IGC_PTM_STAT, stat,
+ stat, IGC_PTM_STAT_SLEEP,
+ IGC_PTM_STAT_TIMEOUT))
+ netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
+
+ igc_ptm_reset(hw);
break;
default:
/* No work to do. */
@@ -1314,5 +1341,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+ mutex_unlock(&adapter->ptm_lock);
+
wrfl();
}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 12ddc5793651..f343c6bfc6be 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -222,6 +222,22 @@
#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+/* Time sync registers - preemption statistics */
+#define IGC_PRMPTDRCNT 0x04284 /* Good RX Preempted Packets */
+#define IGC_PRMEVNTTCNT 0x04298 /* TX Preemption event counter */
+#define IGC_PRMEVNTRCNT 0x0429C /* RX Preemption event counter */
+
+ /* Preemption Exception Counter */
+ #define IGC_PRMEXCPRCNT 0x42A0
+/* Received out of order packets with SMD-C */
+#define IGC_PRMEXCPRCNT_OOO_SMDC 0x000000FF
+/* Received out of order packets with SMD-C and wrong Frame CNT */
+#define IGC_PRMEXCPRCNT_OOO_FRAME_CNT 0x0000FF00
+/* Received out of order packets with SMD-C and wrong Frag CNT */
+#define IGC_PRMEXCPRCNT_OOO_FRAG_CNT 0x00FF0000
+/* Received packets with SMD-S and wrong Frag CNT and Frame CNT */
+#define IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT 0xFF000000
+
/* Transmit Scheduling Registers */
#define IGC_TQAVCTRL 0x3570
#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 1e44374ca1ff..f22cc4d4f459 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -2,9 +2,143 @@
/* Copyright (c) 2019 Intel Corporation */
#include "igc.h"
+#include "igc_base.h"
#include "igc_hw.h"
#include "igc_tsn.h"
+#define MIN_MULTPLIER_TX_MIN_FRAG 0
+#define MAX_MULTPLIER_TX_MIN_FRAG 3
+/* Frag size is based on the Section 8.12.2 of the SW User Manual */
+#define TX_MIN_FRAG_SIZE 64
+#define TX_MAX_FRAG_SIZE (TX_MIN_FRAG_SIZE * \
+ (MAX_MULTPLIER_TX_MIN_FRAG + 1))
+
+DEFINE_STATIC_KEY_FALSE(igc_fpe_enabled);
+
+static int igc_fpe_init_smd_frame(struct igc_ring *ring,
+ struct igc_tx_buffer *buffer,
+ struct sk_buff *skb)
+{
+ dma_addr_t dma = dma_map_single(ring->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->protocol = 0;
+ buffer->bytecount = skb->len;
+ buffer->gso_segs = 1;
+ buffer->time_stamp = jiffies;
+ dma_unmap_len_set(buffer, len, skb->len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ return 0;
+}
+
+static int igc_fpe_init_tx_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ enum igc_txd_popts_type type)
+{
+ u32 cmd_type, olinfo_status = 0;
+ struct igc_tx_buffer *buffer;
+ union igc_adv_tx_desc *desc;
+ int err;
+
+ if (!igc_desc_unused(ring))
+ return -EBUSY;
+
+ buffer = &ring->tx_buffer_info[ring->next_to_use];
+ err = igc_fpe_init_smd_frame(ring, buffer, skb);
+ if (err)
+ return err;
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
+ buffer->bytecount;
+
+ olinfo_status |= FIELD_PREP(IGC_ADVTXD_PAYLEN_MASK, buffer->bytecount);
+
+ switch (type) {
+ case SMD_V:
+ case SMD_R:
+ olinfo_status |= FIELD_PREP(IGC_TXD_POPTS_SMD_MASK, type);
+ break;
+ }
+
+ desc = IGC_TX_DESC(ring, ring->next_to_use);
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
+
+ netdev_tx_sent_queue(txring_txq(ring), skb->len);
+
+ buffer->next_to_watch = desc;
+ ring->next_to_use = (ring->next_to_use + 1) % ring->count;
+
+ return 0;
+}
+
+static int igc_fpe_xmit_smd_frame(struct igc_adapter *adapter,
+ enum igc_txd_popts_type type)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct igc_ring *ring;
+ struct sk_buff *skb;
+ int err;
+
+ ring = igc_get_tx_ring(adapter, cpu);
+ nq = txring_txq(ring);
+
+ skb = alloc_skb(SMD_FRAME_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_zero(skb, SMD_FRAME_SIZE);
+
+ __netif_tx_lock(nq, cpu);
+
+ err = igc_fpe_init_tx_descriptor(ring, skb, type);
+ igc_flush_tx_descriptors(ring);
+
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
+static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
+ enum ethtool_mpacket type)
+{
+ struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
+ struct igc_adapter *adapter;
+ int err;
+
+ adapter = container_of(fpe, struct igc_adapter, fpe);
+
+ if (type == ETHTOOL_MPACKET_VERIFY) {
+ err = igc_fpe_xmit_smd_frame(adapter, SMD_V);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error sending SMD-V\n");
+ } else if (type == ETHTOOL_MPACKET_RESPONSE) {
+ err = igc_fpe_xmit_smd_frame(adapter, SMD_R);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error sending SMD-R frame\n");
+ }
+}
+
+static const struct ethtool_mmsv_ops igc_mmsv_ops = {
+ .send_mpacket = igc_fpe_send_mpacket,
+};
+
+void igc_fpe_init(struct igc_adapter *adapter)
+{
+ adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE;
+ ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops);
+}
+
static bool is_any_launchtime(struct igc_adapter *adapter)
{
int i;
@@ -37,17 +171,16 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
{
unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
- if (adapter->taprio_offload_enable)
- new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
- if (is_any_launchtime(adapter))
+ if (adapter->taprio_offload_enable || is_any_launchtime(adapter) ||
+ adapter->strict_priority_enable)
new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
if (is_cbs_enabled(adapter))
new_flags |= IGC_FLAG_TSN_QAV_ENABLED;
- if (adapter->strict_priority_enable)
- new_flags |= IGC_FLAG_TSN_LEGACY_ENABLED;
+ if (adapter->fpe.mmsv.pmac_enabled)
+ new_flags |= IGC_FLAG_TSN_PREEMPT_ENABLED;
return new_flags;
}
@@ -125,6 +258,29 @@ static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
wr32(IGC_TXARB, txarb);
}
+/**
+ * igc_tsn_set_rxpbsize - Set the receive packet buffer size
+ * @adapter: Pointer to the igc_adapter structure
+ * @rxpbs_exp_bmc_be: Value to set the receive packet buffer size, including
+ * express buffer, BMC buffer, and Best Effort buffer
+ *
+ * The IGC_RXPBS register value may include allocations for the Express buffer,
+ * BMC buffer, Best Effort buffer, and the timestamp descriptor buffer
+ * (IGC_RXPBS_CFG_TS_EN).
+ */
+static void igc_tsn_set_rxpbsize(struct igc_adapter *adapter,
+ u32 rxpbs_exp_bmc_be)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 rxpbs = rd32(IGC_RXPBS);
+
+ rxpbs &= ~(IGC_RXPBSIZE_EXP_MASK | IGC_BMC2OSPBSIZE_MASK |
+ IGC_RXPBSIZE_BE_MASK);
+ rxpbs |= rxpbs_exp_bmc_be;
+
+ wr32(IGC_RXPBS, rxpbs);
+}
+
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
@@ -136,15 +292,18 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
int i;
wr32(IGC_GTXOFFSET, 0);
- wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+ igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
+
if (igc_is_device_id_i226(hw))
igc_tsn_restore_retx_default(adapter);
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
- IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
+ IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS |
+ IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
wr32(IGC_TQAVCTRL, tqavctrl);
@@ -157,16 +316,12 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_QBVCYCLET_S, 0);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
- /* Reset mqprio TC configuration. */
- netdev_reset_tc(adapter->netdev);
-
/* Restore the default Tx arbitration: Priority 0 has the highest
* priority and is assigned to queue 0 and so on and so forth.
*/
igc_tsn_tx_arb(adapter, queue_per_tc);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
- adapter->flags &= ~IGC_FLAG_TSN_LEGACY_ENABLED;
return 0;
}
@@ -190,53 +345,51 @@ static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
wr32(IGC_RETX_CTL, retxctl);
}
+static u8 igc_fpe_get_frag_size_mult(const struct igc_fpe_t *fpe)
+{
+ u8 mult = (fpe->tx_min_frag_size / TX_MIN_FRAG_SIZE) - 1;
+
+ return clamp_t(u8, mult, MIN_MULTPLIER_TX_MIN_FRAG,
+ MAX_MULTPLIER_TX_MIN_FRAG);
+}
+
+u32 igc_fpe_get_supported_frag_size(u32 frag_size)
+{
+ const u32 supported_sizes[] = {64, 128, 192, 256};
+
+ /* Find the smallest supported size that is >= frag_size */
+ for (int i = 0; i < ARRAY_SIZE(supported_sizes); i++) {
+ if (frag_size <= supported_sizes[i])
+ return supported_sizes[i];
+ }
+
+ /* Should not happen */
+ return TX_MAX_FRAG_SIZE;
+}
+
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl, baset_l, baset_h;
u32 sec, nsec, cycle;
ktime_t base_time, systim;
+ u32 frag_size_mult;
int i;
wr32(IGC_TSAUXC, 0);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+ igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_BE_TSN);
+
if (igc_is_device_id_i226(hw))
igc_tsn_set_retx_qbvfullthreshold(adapter);
if (adapter->strict_priority_enable) {
- int err;
-
- err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
- if (err)
- return err;
-
- for (i = 0; i < adapter->num_tc; i++) {
- err = netdev_set_tc_queue(adapter->netdev, i, 1,
- adapter->queue_per_tc[i]);
- if (err)
- return err;
- }
-
- /* In case the card is configured with less than four queues. */
- for (; i < IGC_MAX_TX_QUEUES; i++)
- adapter->queue_per_tc[i] = i;
-
/* Configure queue priorities according to the user provided
* mapping.
*/
igc_tsn_tx_arb(adapter, adapter->queue_per_tc);
-
- /* Enable legacy TSN mode which will do strict priority without
- * any other TSN features.
- */
- tqavctrl = rd32(IGC_TQAVCTRL);
- tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN;
- tqavctrl &= ~IGC_TQAVCTRL_ENHANCED_QAV;
- wr32(IGC_TQAVCTRL, tqavctrl);
-
- return 0;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -361,10 +514,16 @@ skip_cbs:
wr32(IGC_TXQCTL(i), txqctl);
}
- tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS;
-
+ tqavctrl = rd32(IGC_TQAVCTRL) & ~(IGC_TQAVCTRL_FUTSCDDIS |
+ IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
+ if (adapter->fpe.mmsv.pmac_enabled)
+ tqavctrl |= IGC_TQAVCTRL_PREEMPT_ENA;
+
+ frag_size_mult = igc_fpe_get_frag_size_mult(&adapter->fpe);
+ tqavctrl |= FIELD_PREP(IGC_TQAVCTRL_MIN_FRAG_MASK, frag_size_mult);
+
adapter->qbv_count++;
cycle = adapter->cycle_time;
@@ -425,6 +584,14 @@ int igc_tsn_reset(struct igc_adapter *adapter)
unsigned int new_flags;
int err = 0;
+ if (adapter->fpe.mmsv.pmac_enabled) {
+ err = igc_enable_empty_addr_recv(adapter);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error adding empty address to MAC filter\n");
+ } else {
+ igc_disable_empty_addr_recv(adapter);
+ }
+
new_flags = igc_tsn_new_flags(adapter);
if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index 98ec845a86bf..c2a77229207b 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -4,9 +4,61 @@
#ifndef _IGC_TSN_H_
#define _IGC_TSN_H_
+#define IGC_RX_MIN_FRAG_SIZE 60
+#define SMD_FRAME_SIZE 60
+
+enum igc_txd_popts_type {
+ SMD_V = 0x01,
+ SMD_R = 0x02,
+};
+
+DECLARE_STATIC_KEY_FALSE(igc_fpe_enabled);
+
+void igc_fpe_init(struct igc_adapter *adapter);
+u32 igc_fpe_get_supported_frag_size(u32 frag_size);
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter);
+static inline bool igc_fpe_is_pmac_enabled(struct igc_adapter *adapter)
+{
+ return static_branch_unlikely(&igc_fpe_enabled) &&
+ adapter->fpe.mmsv.pmac_enabled;
+}
+
+static inline bool igc_fpe_handle_mpacket(struct igc_adapter *adapter,
+ union igc_adv_rx_desc *rx_desc,
+ unsigned int size, void *pktbuf)
+{
+ u32 status_error = le32_to_cpu(rx_desc->wb.upper.status_error);
+ int smd;
+
+ smd = FIELD_GET(IGC_RXDADV_STAT_SMD_TYPE_MASK, status_error);
+ if (smd != IGC_RXD_STAT_SMD_TYPE_V && smd != IGC_RXD_STAT_SMD_TYPE_R)
+ return false;
+
+ if (size == SMD_FRAME_SIZE && mem_is_zero(pktbuf, SMD_FRAME_SIZE)) {
+ struct ethtool_mmsv *mmsv = &adapter->fpe.mmsv;
+ enum ethtool_mmsv_event event;
+
+ if (smd == IGC_RXD_STAT_SMD_TYPE_V)
+ event = ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET;
+ else
+ event = ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET;
+
+ ethtool_mmsv_event_handle(mmsv, event);
+ }
+
+ return true;
+}
+
+static inline bool igc_fpe_transmitted_smd_v(union igc_adv_tx_desc *tx_desc)
+{
+ u32 olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status);
+ u8 smd = FIELD_GET(IGC_TXD_POPTS_SMD_MASK, olinfo_status);
+
+ return smd == SMD_V;
+}
+
#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index e27af72aada8..9eb47b4beb06 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -13,6 +13,8 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct net_device *dev = adapter->netdev;
bool if_running = netif_running(dev);
struct bpf_prog *old_prog;
+ bool need_update;
+ unsigned int i;
if (dev->mtu > ETH_DATA_LEN) {
/* For now, the driver doesn't support XDP functionality with
@@ -22,8 +24,14 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
- if (if_running)
- igc_close(dev);
+ need_update = !!adapter->xdp_prog != !!prog;
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igc_disable_rx_ring(adapter->rx_ring[i]);
+ igc_disable_tx_ring(adapter->tx_ring[i]);
+ napi_disable(&adapter->rx_ring[i]->q_vector->napi);
+ }
+ }
old_prog = xchg(&adapter->xdp_prog, prog);
if (old_prog)
@@ -34,8 +42,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
else
xdp_features_clear_redirect_target(dev);
- if (if_running)
- igc_open(dev);
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ napi_enable(&adapter->rx_ring[i]->q_vector->napi);
+ igc_enable_tx_ring(adapter->tx_ring[i]);
+ igc_enable_rx_ring(adapter->rx_ring[i]);
+ }
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 965e5ce1b326..2e7738f41c58 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,15 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 1999 - 2018 Intel Corporation.
+# Copyright(c) 1999 - 2024 Intel Corporation.
#
# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
#
+subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
- ixgbe_xsk.o
+ ixgbe_xsk.o ixgbe_e610.o devlink/devlink.o ixgbe_fw_update.o \
+ devlink/region.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
new file mode 100644
index 000000000000..54f1b83dfe42
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
@@ -0,0 +1,557 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ixgbe.h"
+#include "devlink.h"
+#include "ixgbe_fw_update.h"
+
+struct ixgbe_info_ctx {
+ char buf[128];
+ struct ixgbe_orom_info pending_orom;
+ struct ixgbe_nvm_info pending_nvm;
+ struct ixgbe_netlist_info pending_netlist;
+ struct ixgbe_hw_dev_caps dev_caps;
+};
+
+enum ixgbe_devlink_version_type {
+ IXGBE_DL_VERSION_RUNNING,
+ IXGBE_DL_VERSION_STORED
+};
+
+static void ixgbe_info_get_dsn(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ u8 dsn[8];
+
+ /* Copy the DSN into an array in Big Endian format */
+ put_unaligned_be64(pci_get_dsn(adapter->pdev), dsn);
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
+}
+
+static void ixgbe_info_orom_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ ctx->buf[0] = '\0';
+
+ if (hw->mac.type == ixgbe_mac_e610) {
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_orom)
+ orom = &ctx->pending_orom;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ orom->major, orom->build, orom->patch);
+ return;
+ }
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+ if (nvm_ver.oem_valid) {
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x",
+ nvm_ver.oem_major, nvm_ver.oem_minor,
+ nvm_ver.oem_release);
+
+ return;
+ }
+
+ ixgbe_get_orom_version(hw, &nvm_ver);
+ if (nvm_ver.or_valid)
+ snprintf(ctx->buf, sizeof(ctx->buf), "%d.%d.%d",
+ nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
+}
+
+static void ixgbe_info_eetrack(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ if (hw->mac.type == ixgbe_mac_e610) {
+ u32 eetrack = hw->flash.nvm.eetrack;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ eetrack = ctx->pending_nvm.eetrack;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", eetrack);
+ return;
+ }
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+
+ /* No ETRACK version for OEM */
+ if (nvm_ver.oem_valid) {
+ ctx->buf[0] = '\0';
+ return;
+ }
+
+ ixgbe_get_etk_id(hw, &nvm_ver);
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm_ver.etk_id);
+}
+
+static void ixgbe_info_fw_api(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ hw->api_maj_ver, hw->api_min_ver, hw->api_patch);
+}
+
+static void ixgbe_info_fw_build(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
+}
+
+static void ixgbe_info_fw_srev(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ nvm = &ctx->pending_nvm;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", nvm->srev);
+}
+
+static void ixgbe_info_orom_srev(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_orom)
+ orom = &ctx->pending_orom;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", orom->srev);
+}
+
+static void ixgbe_info_nvm_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ nvm = &ctx->pending_nvm;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
+}
+
+static void ixgbe_info_netlist_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_netlist_info *netlist = &adapter->hw.flash.netlist;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ netlist = &ctx->pending_netlist;
+
+ /* The netlist version fields are BCD formatted */
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
+ netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
+}
+
+static void ixgbe_info_netlist_build(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_netlist_info *netlist = &adapter->hw.flash.netlist;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ netlist = &ctx->pending_netlist;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
+}
+
+static int ixgbe_set_ctx_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_info_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ bool *pending_orom, *pending_nvm, *pending_netlist;
+ int err;
+
+ err = ixgbe_discover_dev_caps(hw, &ctx->dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to discover device capabilities");
+ return err;
+ }
+
+ pending_orom = &ctx->dev_caps.common_cap.nvm_update_pending_orom;
+ pending_nvm = &ctx->dev_caps.common_cap.nvm_update_pending_nvm;
+ pending_netlist = &ctx->dev_caps.common_cap.nvm_update_pending_netlist;
+
+ if (*pending_orom) {
+ err = ixgbe_get_inactive_orom_ver(hw, &ctx->pending_orom);
+ if (err)
+ *pending_orom = false;
+ }
+
+ if (*pending_nvm) {
+ err = ixgbe_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
+ if (err)
+ *pending_nvm = false;
+ }
+
+ if (*pending_netlist) {
+ err = ixgbe_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
+ if (err)
+ *pending_netlist = false;
+ }
+
+ return 0;
+}
+
+static int ixgbe_devlink_info_get_e610(struct ixgbe_adapter *adapter,
+ struct devlink_info_req *req,
+ struct ixgbe_info_ctx *ctx)
+{
+ int err;
+
+ ixgbe_info_fw_api(adapter, ctx);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_build(adapter, ctx);
+ err = devlink_info_version_running_put(req, "fw.mgmt.build", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_srev(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.mgmt.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_orom_srev(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.undi.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_nvm_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.psid.api", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.netlist", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_build(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ return devlink_info_version_running_put(req, "fw.netlist.build",
+ ctx->buf);
+}
+
+static int
+ixgbe_devlink_pending_info_get_e610(struct ixgbe_adapter *adapter,
+ struct devlink_info_req *req,
+ struct ixgbe_info_ctx *ctx)
+{
+ int err;
+
+ ixgbe_info_orom_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_eetrack(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_srev(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.mgmt.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_orom_srev(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.undi.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_nvm_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.psid.api", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.netlist", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_build(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ return devlink_info_version_stored_put(req, "fw.netlist.build",
+ ctx->buf);
+}
+
+static int ixgbe_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_info_ctx *ctx;
+ int err;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (hw->mac.type == ixgbe_mac_e610)
+ ixgbe_refresh_fw_version(adapter);
+
+ ixgbe_info_get_dsn(adapter, ctx);
+ err = devlink_info_serial_number_put(req, ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ err = hw->eeprom.ops.read_pba_string(hw, ctx->buf, sizeof(ctx->buf));
+ if (err)
+ goto free_ctx;
+
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ ixgbe_info_orom_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI,
+ ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ ixgbe_info_eetrack(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ ctx->buf);
+ if (err || hw->mac.type != ixgbe_mac_e610)
+ goto free_ctx;
+
+ err = ixgbe_set_ctx_dev_caps(hw, ctx, extack);
+ if (err)
+ goto free_ctx;
+
+ err = ixgbe_devlink_info_get_e610(adapter, req, ctx);
+ if (err)
+ goto free_ctx;
+
+ err = ixgbe_devlink_pending_info_get_e610(adapter, req, ctx);
+free_ctx:
+ kfree(ctx);
+ return err;
+}
+
+/**
+ * ixgbe_devlink_reload_empr_start - Start EMP reset to activate new firmware
+ * @devlink: pointer to the devlink instance to reload
+ * @netns_change: if true, the network namespace is changing
+ * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
+ * @limit: limits on what reload should do, such as not resetting
+ * @extack: netlink extended ACK structure
+ *
+ * Allow user to activate new Embedded Management Processor firmware by
+ * issuing device specific EMP reset. Called in response to
+ * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE.
+ *
+ * Note that teardown and rebuild of the driver state happens automatically as
+ * part of an interrupt and watchdog task. This is because all physical
+ * functions on the device must be able to reset when an EMP reset occurs from
+ * any source.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_devlink_reload_empr_start(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 pending;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ err = ixgbe_get_pending_updates(adapter, &pending, extack);
+ if (err)
+ return err;
+
+ /* Pending is a bitmask of which flash banks have a pending update,
+ * including the main NVM bank, the Option ROM bank, and the netlist
+ * bank. If any of these bits are set, then there is a pending update
+ * waiting to be activated.
+ */
+ if (!pending) {
+ NL_SET_ERR_MSG_MOD(extack, "No pending firmware update");
+ return -ECANCELED;
+ }
+
+ if (adapter->fw_emp_reset_disabled) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "EMP reset is not available. To activate firmware, a reboot or power cycle is needed");
+ return -ECANCELED;
+ }
+
+ err = ixgbe_aci_nvm_update_empr(hw);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to trigger EMP device reset to reload firmware");
+
+ return err;
+}
+
+/*Wait for 10 sec with 0.5 sec tic. EMPR takes no less than half of a sec */
+#define IXGBE_DEVLINK_RELOAD_TIMEOUT_SEC 20
+
+/**
+ * ixgbe_devlink_reload_empr_finish - finishes EMP reset
+ * @devlink: pointer to the devlink instance
+ * @action: the action to perform.
+ * @limit: limits on what reload should do
+ * @actions_performed: actions performed
+ * @extack: netlink extended ACK structure
+ *
+ * Wait for new NVM to be loaded during EMP reset.
+ *
+ * Return: -ETIME when timer is exceeded, 0 on success.
+ */
+static int ixgbe_devlink_reload_empr_finish(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i = 0;
+ u32 fwsm;
+
+ do {
+ /* Just right away after triggering EMP reset the FWSM register
+ * may be not cleared yet, so begin the loop with the delay
+ * in order to not check the not updated register.
+ */
+ mdelay(500);
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+
+ if (i++ >= IXGBE_DEVLINK_RELOAD_TIMEOUT_SEC)
+ return -ETIME;
+
+ } while (!(fwsm & IXGBE_FWSM_FW_VAL_BIT));
+
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
+
+ adapter->flags2 &= ~(IXGBE_FLAG2_API_MISMATCH |
+ IXGBE_FLAG2_FW_ROLLBACK);
+
+ return 0;
+}
+
+static const struct devlink_ops ixgbe_devlink_ops = {
+ .info_get = ixgbe_devlink_info_get,
+ .supported_flash_update_params =
+ DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .flash_update = ixgbe_flash_pldm_image,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_down = ixgbe_devlink_reload_empr_start,
+ .reload_up = ixgbe_devlink_reload_empr_finish,
+};
+
+/**
+ * ixgbe_allocate_devlink - Allocate devlink instance
+ * @dev: device to allocate devlink for
+ *
+ * Allocate a devlink instance for this physical function.
+ *
+ * Return: pointer to the device adapter structure on success,
+ * ERR_PTR(-ENOMEM) when allocation failed.
+ */
+struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev)
+{
+ struct ixgbe_adapter *adapter;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ixgbe_devlink_ops, sizeof(*adapter), dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+
+ adapter = devlink_priv(devlink);
+ adapter->devlink = devlink;
+
+ return adapter;
+}
+
+/**
+ * ixgbe_devlink_set_switch_id - Set unique switch ID based on PCI DSN
+ * @adapter: pointer to the device adapter structure
+ * @ppid: struct with switch id information
+ */
+static void ixgbe_devlink_set_switch_id(struct ixgbe_adapter *adapter,
+ struct netdev_phys_item_id *ppid)
+{
+ u64 id = pci_get_dsn(adapter->pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+/**
+ * ixgbe_devlink_register_port - Register devlink port
+ * @adapter: pointer to the device adapter structure
+ *
+ * Create and register a devlink_port for this physical function.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter)
+{
+ struct devlink_port *devlink_port = &adapter->devlink_port;
+ struct devlink *devlink = adapter->devlink;
+ struct device *dev = &adapter->pdev->dev;
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = adapter->hw.bus.func;
+ ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devl_port_register(devlink, devlink_port, 0);
+ if (err) {
+ dev_err(dev,
+ "devlink port registration failed, err %d\n", err);
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h
new file mode 100644
index 000000000000..381558058048
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025, Intel Corporation. */
+
+#ifndef _IXGBE_DEVLINK_H_
+#define _IXGBE_DEVLINK_H_
+
+struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev);
+int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter);
+void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter);
+void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter);
+
+#endif /* _IXGBE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/region.c b/drivers/net/ethernet/intel/ixgbe/devlink/region.c
new file mode 100644
index 000000000000..76f6571c3c34
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/region.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ixgbe.h"
+#include "devlink.h"
+
+#define IXGBE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
+
+static const struct devlink_region_ops ixgbe_nvm_region_ops;
+static const struct devlink_region_ops ixgbe_sram_region_ops;
+
+static int ixgbe_devlink_parse_region(struct ixgbe_hw *hw,
+ const struct devlink_region_ops *ops,
+ bool *read_shadow_ram, u32 *nvm_size)
+{
+ if (ops == &ixgbe_nvm_region_ops) {
+ *read_shadow_ram = false;
+ *nvm_size = hw->flash.flash_size;
+ } else if (ops == &ixgbe_sram_region_ops) {
+ *read_shadow_ram = true;
+ *nvm_size = hw->flash.sr_words * 2u;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_nvm_snapshot - Capture a snapshot of the NVM content
+ * @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_NEW cmd.
+ *
+ * Capture a snapshot of the whole requested NVM region.
+ *
+ * No need to worry with freeing @data, devlink core takes care if it.
+ *
+ * Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when
+ * cannot lock NVM, -ENOMEM when cannot alloc mem and -EIO when error
+ * occurs during reading.
+ */
+static int ixgbe_devlink_nvm_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack, u8 **data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool read_shadow_ram;
+ u8 *nvm_data, *buf;
+ u32 nvm_size, left;
+ u8 num_blks;
+ int err;
+
+ err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size);
+ if (err)
+ return err;
+
+ nvm_data = kvzalloc(nvm_size, GFP_KERNEL);
+ if (!nvm_data)
+ return -ENOMEM;
+
+ num_blks = DIV_ROUND_UP(nvm_size, IXGBE_DEVLINK_READ_BLK_SIZE);
+ buf = nvm_data;
+ left = nvm_size;
+
+ for (int i = 0; i < num_blks; i++) {
+ u32 read_sz = min_t(u32, IXGBE_DEVLINK_READ_BLK_SIZE, left);
+
+ /* Need to acquire NVM lock during each loop run because the
+ * total period of reading whole NVM is longer than the maximum
+ * period the lock can be taken defined by the IXGBE_NVM_TIMEOUT.
+ */
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire NVM semaphore");
+ kvfree(nvm_data);
+ return -EBUSY;
+ }
+
+ err = ixgbe_read_flat_nvm(hw, i * IXGBE_DEVLINK_READ_BLK_SIZE,
+ &read_sz, buf, read_shadow_ram);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read RAM content");
+ ixgbe_release_nvm(hw);
+ kvfree(nvm_data);
+ return -EIO;
+ }
+
+ ixgbe_release_nvm(hw);
+
+ buf += read_sz;
+ left -= read_sz;
+ }
+
+ *data = nvm_data;
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_devcaps_snapshot - Capture a snapshot of device capabilities
+ * @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_NEW for
+ * the device-caps devlink region.
+ *
+ * Capture a snapshot of the device capabilities reported by firmware.
+ *
+ * No need to worry with freeing @data, devlink core takes care if it.
+ *
+ * Return: 0 on success, -ENOMEM when cannot alloc mem, or return code of
+ * the reading operation.
+ */
+static int ixgbe_devlink_devcaps_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_aci_cmd_list_caps_elem *caps;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ caps = kvzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ err = ixgbe_aci_list_caps(hw, caps, IXGBE_ACI_MAX_BUFFER_SIZE, NULL,
+ ixgbe_aci_opc_list_dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read device capabilities");
+ kvfree(caps);
+ return err;
+ }
+
+ *data = (u8 *)caps;
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_nvm_read - Read a portion of NVM flash content
+ * @devlink: the devlink instance
+ * @ops: the devlink region to snapshot
+ * @extack: extended ACK response structure
+ * @offset: the offset to start at
+ * @size: the amount to read
+ * @data: the data buffer to read into
+ *
+ * This function is called in response to DEVLINK_CMD_REGION_READ to directly
+ * read a section of the NVM contents.
+ *
+ * Read from either the nvm-flash region either shadow-ram region.
+ *
+ * Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when
+ * cannot lock NVM, -ERANGE when buffer limit exceeded and -EIO when error
+ * occurs during reading.
+ */
+static int ixgbe_devlink_nvm_read(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u64 offset, u32 size, u8 *data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool read_shadow_ram;
+ u32 nvm_size;
+ int err;
+
+ err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size);
+ if (err)
+ return err;
+
+ if (offset + size > nvm_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size");
+ return -ERANGE;
+ }
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EBUSY;
+ }
+
+ err = ixgbe_read_flat_nvm(hw, (u32)offset, &size, data, read_shadow_ram);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ ixgbe_release_nvm(hw);
+ return -EIO;
+ }
+
+ ixgbe_release_nvm(hw);
+ return 0;
+}
+
+static const struct devlink_region_ops ixgbe_nvm_region_ops = {
+ .name = "nvm-flash",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_nvm_snapshot,
+ .read = ixgbe_devlink_nvm_read,
+};
+
+static const struct devlink_region_ops ixgbe_sram_region_ops = {
+ .name = "shadow-ram",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_nvm_snapshot,
+ .read = ixgbe_devlink_nvm_read,
+};
+
+static const struct devlink_region_ops ixgbe_devcaps_region_ops = {
+ .name = "device-caps",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_devcaps_snapshot,
+};
+
+/**
+ * ixgbe_devlink_init_regions - Initialize devlink regions
+ * @adapter: adapter instance
+ *
+ * Create devlink regions used to enable access to dump the contents of the
+ * flash memory of the device.
+ */
+void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct device *dev = &adapter->pdev->dev;
+ u64 nvm_size, sram_size;
+
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ nvm_size = adapter->hw.flash.flash_size;
+ adapter->nvm_region = devl_region_create(devlink, &ixgbe_nvm_region_ops,
+ 1, nvm_size);
+ if (IS_ERR(adapter->nvm_region)) {
+ dev_err(dev,
+ "Failed to create NVM devlink region, err %ld\n",
+ PTR_ERR(adapter->nvm_region));
+ adapter->nvm_region = NULL;
+ }
+
+ sram_size = adapter->hw.flash.sr_words * 2u;
+ adapter->sram_region = devl_region_create(devlink, &ixgbe_sram_region_ops,
+ 1, sram_size);
+ if (IS_ERR(adapter->sram_region)) {
+ dev_err(dev,
+ "Failed to create shadow-ram devlink region, err %ld\n",
+ PTR_ERR(adapter->sram_region));
+ adapter->sram_region = NULL;
+ }
+
+ adapter->devcaps_region = devl_region_create(devlink,
+ &ixgbe_devcaps_region_ops,
+ 10, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if (IS_ERR(adapter->devcaps_region)) {
+ dev_err(dev,
+ "Failed to create device-caps devlink region, err %ld\n",
+ PTR_ERR(adapter->devcaps_region));
+ adapter->devcaps_region = NULL;
+ }
+}
+
+/**
+ * ixgbe_devlink_destroy_regions - Destroy devlink regions
+ * @adapter: adapter instance
+ *
+ * Remove previously created regions for this adapter instance.
+ */
+void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter)
+{
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ if (adapter->nvm_region)
+ devl_region_destroy(adapter->nvm_region);
+
+ if (adapter->sram_region)
+ devl_region_destroy(adapter->sram_region);
+
+ if (adapter->devcaps_region)
+ devl_region_destroy(adapter->devcaps_region);
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 559b443c409f..47311b134a7a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBE_H_
#define _IXGBE_H_
@@ -17,9 +17,12 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/devlink.h>
+
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
+#include "ixgbe_e610.h"
#if IS_ENABLED(CONFIG_FCOE)
#define IXGBE_FCOE
#include "ixgbe_fcoe.h"
@@ -173,6 +176,7 @@ enum ixgbe_tx_flags {
#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
+#define IXGBE_E610_VF_DEVICE_ID 0x57AD
#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
{ \
@@ -610,6 +614,11 @@ struct ixgbe_adapter {
struct bpf_prog *xdp_prog;
struct pci_dev *pdev;
struct mii_bus *mii_bus;
+ struct devlink *devlink;
+ struct devlink_port devlink_port;
+ struct devlink_region *nvm_region;
+ struct devlink_region *sram_region;
+ struct devlink_region *devcaps_region;
unsigned long state;
@@ -654,6 +663,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
+#define IXGBE_FLAG2_FW_ASYNC_EVENT BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
@@ -661,6 +671,11 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19)
+#define IXGBE_FLAG2_PHY_FW_LOAD_FAILED BIT(20)
+#define IXGBE_FLAG2_NO_MEDIA BIT(21)
+#define IXGBE_FLAG2_MOD_POWER_UNSUPPORTED BIT(22)
+#define IXGBE_FLAG2_API_MISMATCH BIT(23)
+#define IXGBE_FLAG2_FW_ROLLBACK BIT(24)
/* Tx fast path data */
int num_tx_queues;
@@ -749,6 +764,8 @@ struct ixgbe_adapter {
u32 atr_sample_rate;
spinlock_t fdir_perfect_lock;
+ bool fw_emp_reset_disabled;
+
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
@@ -793,6 +810,7 @@ struct ixgbe_adapter {
u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table;
struct kobject *info_kobj;
+ u16 lse_mask;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
@@ -823,6 +841,17 @@ struct ixgbe_adapter {
spinlock_t vfs_lock;
};
+struct ixgbe_netdevice_priv {
+ struct ixgbe_adapter *adapter;
+};
+
+static inline struct ixgbe_adapter *ixgbe_from_netdev(struct net_device *netdev)
+{
+ struct ixgbe_netdevice_priv *priv = netdev_priv(netdev);
+
+ return priv->adapter;
+}
+
static inline int ixgbe_determine_xdp_q_idx(int cpu)
{
if (static_key_enabled(&ixgbe_xdp_locking_key))
@@ -849,6 +878,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
return IXGBE_MAX_RSS_INDICES_X550;
default:
return 0;
@@ -874,6 +904,7 @@ enum ixgbe_state_t {
__IXGBE_PTP_RUNNING,
__IXGBE_PTP_TX_IN_PROGRESS,
__IXGBE_RESET_REQUESTED,
+ __IXGBE_PHY_INIT_COMPLETE,
};
struct ixgbe_cb {
@@ -896,6 +927,7 @@ enum ixgbe_boards {
board_x550em_x_fw,
board_x550em_a,
board_x550em_a_fw,
+ board_e610,
};
extern const struct ixgbe_info ixgbe_82598_info;
@@ -906,6 +938,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info;
extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
extern const struct ixgbe_info ixgbe_x550em_a_info;
extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
+extern const struct ixgbe_info ixgbe_e610_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
#endif
@@ -934,6 +967,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
+void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter);
+void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4aaaea3b5f8f..444da982593f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1169,6 +1169,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index cdaf087b4e85..d5b1b974b4a3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -1615,6 +1615,7 @@ int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
break;
default:
@@ -2229,6 +2230,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3be1bfb16498..5784d5d1896e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -58,6 +58,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_E610_SFP:
supported = false;
break;
default:
@@ -88,6 +89,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
supported = true;
break;
default:
@@ -329,6 +332,7 @@ int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
* Devices in the second generation:
* 82599
* X540
+ * E610
**/
int ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{
@@ -469,9 +473,14 @@ int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_e610) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
+ }
+
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
@@ -660,7 +669,11 @@ int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
hw->bus.type = ixgbe_bus_type_pci_express;
/* Get the negotiated link width and speed from PCI config space */
- link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
+ if (hw->mac.type == ixgbe_mac_e610)
+ link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS_E610);
+ else
+ link_status = ixgbe_read_pci_cfg_word(hw,
+ IXGBE_PCI_LINK_STATUS);
hw->bus.width = ixgbe_convert_bus_width(link_status);
hw->bus.speed = ixgbe_convert_bus_speed(link_status);
@@ -2918,6 +2931,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
+ case ixgbe_mac_e610:
+ pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
default:
return 1;
}
@@ -3366,7 +3383,8 @@ int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
- if ((hw->mac.type >= ixgbe_mac_X550) &&
+ if ((hw->mac.type >= ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_e610) &&
(links_reg & IXGBE_LINKS_SPEED_NON_STD))
*speed = IXGBE_LINK_SPEED_5GB_FULL;
else
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 6493abf189de..6639069ad528 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -194,6 +194,8 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
dev_err(&adapter->pdev->dev, format, ## arg)
#define e_dev_notice(format, arg...) \
dev_notice(&adapter->pdev->dev, format, ## arg)
+#define e_dbg(msglvl, format, arg...) \
+ netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_info(msglvl, format, arg...) \
netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_err(msglvl, format, arg...) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index f2709b10c2e5..3dd5a16a14df 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe.h"
#include <linux/dcbnl.h>
@@ -118,14 +118,14 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
}
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
@@ -142,7 +142,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
u8 *perm_addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i, j;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
@@ -154,6 +154,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
break;
@@ -166,7 +167,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
@@ -183,7 +184,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
}
@@ -192,7 +193,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
@@ -209,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
}
@@ -218,7 +219,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
@@ -229,7 +230,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
}
@@ -238,7 +239,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
@@ -249,7 +250,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
}
@@ -257,7 +258,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
u8 setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
@@ -268,14 +269,14 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
u8 *setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
static void ixgbe_dcbnl_devreset(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
@@ -294,7 +295,7 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
struct ixgbe_hw *hw = &adapter->hw;
int ret = DCB_NO_HW_CHG;
@@ -382,7 +383,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
switch (capid) {
case DCB_CAP_ATTR_PG:
@@ -419,7 +420,7 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
@@ -446,14 +447,14 @@ static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->dcb_cfg.pfc_mode_enable;
}
static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.pfc_mode_enable = state;
}
@@ -470,7 +471,7 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
*/
static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct dcb_app app = {
.selector = idtype,
.protocol = id,
@@ -485,7 +486,7 @@ static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
@@ -505,7 +506,7 @@ static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
struct ieee_ets *ets)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, err;
__u8 max_tc = 0;
@@ -558,7 +559,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
int i;
@@ -583,7 +584,7 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u8 *prio_tc;
int err;
@@ -615,7 +616,7 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -660,7 +661,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
struct dcb_app *app)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -704,13 +705,13 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
return adapter->dcbx_cap;
}
static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_ets ets = {0};
struct ieee_pfc pfc = {0};
int err = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
new file mode 100644
index 000000000000..71ea25de1bac
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -0,0 +1,3995 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Intel Corporation. */
+
+#include "ixgbe_common.h"
+#include "ixgbe_e610.h"
+#include "ixgbe_x550.h"
+#include "ixgbe_type.h"
+#include "ixgbe_x540.h"
+#include "ixgbe_mbx.h"
+#include "ixgbe_phy.h"
+
+/**
+ * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
+ * be resent
+ * @opcode: ACI opcode
+ *
+ * Check if ACI command should be sent again depending on the provided opcode.
+ * It may happen when CSR is busy during link state changes.
+ *
+ * Return: true if the sending command routine should be repeated,
+ * otherwise false.
+ */
+static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
+{
+ switch (opcode) {
+ case ixgbe_aci_opc_disable_rxen:
+ case ixgbe_aci_opc_get_phy_caps:
+ case ixgbe_aci_opc_get_link_status:
+ case ixgbe_aci_opc_get_link_topo:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
+ * Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Admin Command is sent using CSR by setting descriptor and buffer in specific
+ * registers.
+ *
+ * Return: the exit code of the operation.
+ * * - 0 - success.
+ * * - -EIO - CSR mechanism is not enabled.
+ * * - -EBUSY - CSR mechanism is busy.
+ * * - -EINVAL - buf_size is too big or
+ * invalid argument buf or buf_size.
+ * * - -ETIME - Admin Command X command timeout.
+ * * - -EIO - Admin Command X invalid state of HICR register or
+ * Admin Command failed because of bad opcode was returned or
+ * Admin Command failed with error Y.
+ */
+static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
+ struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u16 opcode, buf_tail_size = buf_size % 4;
+ u32 *raw_desc = (u32 *)desc;
+ u32 hicr, i, buf_tail = 0;
+ bool valid_buf = false;
+
+ hw->aci.last_status = IXGBE_ACI_RC_OK;
+
+ /* It's necessary to check if mechanism is enabled */
+ hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR);
+
+ if (!(hicr & IXGBE_PF_HICR_EN))
+ return -EIO;
+
+ if (hicr & IXGBE_PF_HICR_C) {
+ hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
+ return -EBUSY;
+ }
+
+ opcode = le16_to_cpu(desc->opcode);
+
+ if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE)
+ return -EINVAL;
+
+ if (buf)
+ desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_BUF);
+
+ if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_BUF)) {
+ if ((buf && !buf_size) ||
+ (!buf && buf_size))
+ return -EINVAL;
+ if (buf && buf_size)
+ valid_buf = true;
+ }
+
+ if (valid_buf) {
+ if (buf_tail_size)
+ memcpy(&buf_tail, buf + buf_size - buf_tail_size,
+ buf_tail_size);
+
+ if (((buf_size + 3) & ~0x3) > IXGBE_ACI_LG_BUF)
+ desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_LB);
+
+ desc->datalen = cpu_to_le16(buf_size);
+
+ if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_RD)) {
+ for (i = 0; i < buf_size / 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]);
+ if (buf_tail_size)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail);
+ }
+ }
+
+ /* Descriptor is written to specific registers */
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]);
+
+ /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
+ * PF_HICR_EV
+ */
+ hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) &
+ ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV);
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr);
+
+#define MAX_SLEEP_RESP_US 1000
+#define MAX_TMOUT_RESP_SYNC_US 100000000
+
+ /* Wait for sync Admin Command response */
+ read_poll_timeout(IXGBE_READ_REG, hicr,
+ (hicr & IXGBE_PF_HICR_SV) ||
+ !(hicr & IXGBE_PF_HICR_C),
+ MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw,
+ IXGBE_PF_HICR);
+
+#define MAX_TMOUT_RESP_ASYNC_US 150000000
+
+ /* Wait for async Admin Command response */
+ read_poll_timeout(IXGBE_READ_REG, hicr,
+ (hicr & IXGBE_PF_HICR_EV) ||
+ !(hicr & IXGBE_PF_HICR_C),
+ MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw,
+ IXGBE_PF_HICR);
+
+ /* Read sync Admin Command response */
+ if ((hicr & IXGBE_PF_HICR_SV)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i));
+ raw_desc[i] = raw_desc[i];
+ }
+ }
+
+ /* Read async Admin Command response */
+ if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i));
+ raw_desc[i] = raw_desc[i];
+ }
+ }
+
+ /* Handle timeout and invalid state of HICR register */
+ if (hicr & IXGBE_PF_HICR_C)
+ return -ETIME;
+
+ if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV))
+ return -EIO;
+
+ /* For every command other than 0x0014 treat opcode mismatch
+ * as an error. Response to 0x0014 command read from HIDA_2
+ * is a descriptor of an event which is expected to contain
+ * different opcode than the command.
+ */
+ if (desc->opcode != cpu_to_le16(opcode) &&
+ opcode != ixgbe_aci_opc_get_fw_event)
+ return -EIO;
+
+ if (desc->retval) {
+ hw->aci.last_status = (enum ixgbe_aci_err)
+ le16_to_cpu(desc->retval);
+ return -EIO;
+ }
+
+ /* Write a response values to a buf */
+ if (valid_buf) {
+ for (i = 0; i < buf_size / 4; i++)
+ ((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
+ if (buf_tail_size) {
+ buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
+ memcpy(buf + buf_size - buf_tail_size, &buf_tail,
+ buf_tail_size);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Helper function to send FW Admin Commands to the FW Admin Command Interface.
+ *
+ * Retry sending the FW Admin Command multiple times to the FW ACI
+ * if the EBUSY Admin Command error is returned.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u16 opcode = le16_to_cpu(desc->opcode);
+ struct ixgbe_aci_desc desc_cpy;
+ enum ixgbe_aci_err last_status;
+ u8 idx = 0, *buf_cpy = NULL;
+ bool is_cmd_for_retry;
+ unsigned long timeout;
+ int err;
+
+ is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
+ if (is_cmd_for_retry) {
+ if (buf) {
+ buf_cpy = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf_cpy)
+ return -ENOMEM;
+ *buf_cpy = *(u8 *)buf;
+ }
+ desc_cpy = *desc;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS);
+ do {
+ mutex_lock(&hw->aci.lock);
+ err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
+ last_status = hw->aci.last_status;
+ mutex_unlock(&hw->aci.lock);
+
+ if (!is_cmd_for_retry || !err ||
+ last_status != IXGBE_ACI_RC_EBUSY)
+ break;
+
+ if (buf)
+ memcpy(buf, buf_cpy, buf_size);
+ *desc = desc_cpy;
+
+ msleep(IXGBE_ACI_SEND_DELAY_TIME_MS);
+ } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE &&
+ time_before(jiffies, timeout));
+
+ kfree(buf_cpy);
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_check_event_pending - check if there are any pending events
+ * @hw: pointer to the HW struct
+ *
+ * Determine if there are any pending events.
+ *
+ * Return: true if there are any currently pending events
+ * otherwise false.
+ */
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
+{
+ u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
+ u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
+
+ return (fwsts & ep_bit_mask) ? true : false;
+}
+
+/**
+ * ixgbe_aci_get_event - get an event from ACI
+ * @hw: pointer to the HW struct
+ * @e: event information structure
+ * @pending: optional flag signaling that there are more pending events
+ *
+ * Obtain an event from ACI and return its content
+ * through 'e' using ACI command (0x0014).
+ * Provide information if there are more events
+ * to retrieve through 'pending'.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending)
+{
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ if (!e || (!e->msg_buf && e->buf_len))
+ return -EINVAL;
+
+ mutex_lock(&hw->aci.lock);
+
+ /* Check if there are any events pending */
+ if (!ixgbe_aci_check_event_pending(hw)) {
+ err = -ENOENT;
+ goto aci_get_event_exit;
+ }
+
+ /* Obtain pending event */
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
+ err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
+ if (err)
+ goto aci_get_event_exit;
+
+ /* Returned 0x0014 opcode indicates that no event was obtained */
+ if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) {
+ err = -ENOENT;
+ goto aci_get_event_exit;
+ }
+
+ /* Determine size of event data */
+ e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len);
+ /* Write event descriptor to event info structure */
+ memcpy(&e->desc, &desc, sizeof(e->desc));
+
+ /* Check if there are any further events pending */
+ if (pending)
+ *pending = ixgbe_aci_check_event_pending(hw);
+
+aci_get_event_exit:
+ mutex_unlock(&hw->aci.lock);
+
+ return err;
+}
+
+/**
+ * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Helper function to fill the descriptor desc with default values
+ * and the provided opcode.
+ */
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
+{
+ /* Zero out the desc. */
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(IXGBE_ACI_FLAG_SI);
+}
+
+/**
+ * ixgbe_aci_get_fw_ver - Get the firmware version
+ * @hw: pointer to the HW struct
+ *
+ * Get the firmware version using ACI command (0x0001).
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_ver *resp;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ resp = &desc.params.get_ver;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!err) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = le32_to_cpu(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_req_res - request a common resource
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ *
+ * Requests a common resource using the ACI command (0x0008).
+ * Specifies the maximum time the driver may hold the resource.
+ * If the requested resource is currently occupied by some other driver,
+ * a busy return value is returned and the timeout field value indicates the
+ * maximum time the current owner has to free it.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access,
+ u8 sdp_number, u32 *timeout)
+{
+ struct ixgbe_aci_cmd_req_res *cmd_resp;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ cmd_resp = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
+
+ cmd_resp->res_id = cpu_to_le16(res);
+ cmd_resp->access_type = cpu_to_le16(access);
+ cmd_resp->res_number = cpu_to_le32(sdp_number);
+ cmd_resp->timeout = cpu_to_le32(*timeout);
+ *timeout = 0;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ /* If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!err || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_release_res - release a common resource using ACI
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ *
+ * Release a common resource using ACI command (0x0009).
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_release_res(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_ids res, u8 sdp_number)
+{
+ struct ixgbe_aci_cmd_req_res *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
+
+ cmd->res_id = cpu_to_le16(res);
+ cmd->res_number = cpu_to_le32(sdp_number);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_acquire_res - acquire the ownership of a resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * Make an attempt to acquire the ownership of a resource using
+ * the ixgbe_aci_req_res to utilize ACI.
+ * In case if some other driver has previously acquired the resource and
+ * performed any necessary updates, the -EALREADY is returned,
+ * and the caller does not obtain the resource and has no further work to do.
+ * If needed, the function will poll until the current lock owner timeouts.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout)
+{
+#define IXGBE_RES_POLLING_DELAY_MS 10
+ u32 delay = IXGBE_RES_POLLING_DELAY_MS;
+ u32 res_timeout = timeout;
+ u32 retry_timeout;
+ int err;
+
+ err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* A return code of -EALREADY means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (err == -EALREADY)
+ return err;
+
+ /* If necessary, poll until the current lock owner timeouts.
+ * Set retry_timeout to the timeout value reported by the FW in the
+ * response to the "Request Resource Ownership" (0x0008) Admin Command
+ * as it indicates the maximum time the current owner of the resource
+ * is allowed to hold it.
+ */
+ retry_timeout = res_timeout;
+ while (err && retry_timeout && res_timeout) {
+ msleep(delay);
+ retry_timeout = (retry_timeout > delay) ?
+ retry_timeout - delay : 0;
+ err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* Success - lock acquired.
+ * -EALREADY - lock free, no work to do.
+ */
+ if (!err || err == -EALREADY)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_release_res - release a common resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * Release a common resource using ixgbe_aci_release_res.
+ */
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
+{
+ u32 total_delay = 0;
+ int err;
+
+ err = ixgbe_aci_release_res(hw, res, 0);
+
+ /* There are some rare cases when trying to release the resource
+ * results in an admin command timeout, so handle them correctly.
+ */
+ while (err == -ETIME &&
+ total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) {
+ usleep_range(1000, 1500);
+ err = ixgbe_aci_release_res(hw, res, 0);
+ total_delay++;
+ }
+}
+
+/**
+ * ixgbe_parse_e610_caps - Parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Return: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_caps *caps,
+ struct ixgbe_aci_cmd_list_caps_elem *elem,
+ const char *prefix)
+{
+ u32 logical_id = le32_to_cpu(elem->logical_id);
+ u32 phys_id = le32_to_cpu(elem->phys_id);
+ u32 number = le32_to_cpu(elem->number);
+ u16 cap = le16_to_cpu(elem->cap);
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ break;
+ case IXGBE_ACI_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_VMDQ:
+ caps->vmdq = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ break;
+ case IXGBE_ACI_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_NVM_VER:
+ break;
+ case IXGBE_ACI_CAPS_PENDING_NVM_VER:
+ caps->nvm_update_pending_nvm = true;
+ break;
+ case IXGBE_ACI_CAPS_PENDING_OROM_VER:
+ caps->nvm_update_pending_orom = true;
+ break;
+ case IXGBE_ACI_CAPS_PENDING_NET_VER:
+ caps->nvm_update_pending_netlist = true;
+ break;
+ case IXGBE_ACI_CAPS_NVM_MGMT:
+ caps->nvm_unified_update =
+ (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ break;
+ case IXGBE_ACI_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ break;
+ case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ break;
+ case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
+ {
+ u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
+
+ caps->ext_topo_dev_img_ver_high[index] = number;
+ caps->ext_topo_dev_img_ver_low[index] = logical_id;
+ caps->ext_topo_dev_img_part_num[index] =
+ FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id);
+ caps->ext_topo_dev_img_load_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
+ caps->ext_topo_dev_img_prog_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
+ break;
+ }
+ default:
+ /* Not one of the recognized common capabilities */
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_funcs = hweight32(le32_to_cpu(cap->number));
+}
+
+/**
+ * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VF for device capabilities.
+ */
+static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_vfs_exposed = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
+ */
+static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_FD for device capabilities.
+ */
+static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_flow_director_fltr = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_e610_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+
+ ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i],
+ "dev caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ ixgbe_parse_valid_functions_cap(hw, dev_p,
+ &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_FD:
+ ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+}
+
+/**
+ * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VF.
+ */
+static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->num_allocd_vfs = le32_to_cpu(cap->number);
+ func_p->vf_base_id = le32_to_cpu(cap->logical_id);
+}
+
+/**
+ * ixgbe_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ *
+ * Return: the number of resources per PF or 0, if no PH are available.
+ */
+static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
+{
+#define IXGBE_CAPS_VALID_FUNCS_M GENMASK(7, 0)
+ u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
+ IXGBE_CAPS_VALID_FUNCS_M);
+
+ return funcs ? (max / funcs) : 0;
+}
+
+/**
+ * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
+ */
+static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
+}
+
+/**
+ * ixgbe_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_e610_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(func_p, 0, sizeof(*func_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+
+ ixgbe_parse_e610_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+}
+
+/**
+ * ixgbe_aci_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
+ *
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return -ENOMEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
+ * buffer that firmware could return) to avoid this.
+ *
+ * Return: the exit code of the operation.
+ * Exit code of -ENOMEM means the buffer size is too small.
+ */
+int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc)
+{
+ struct ixgbe_aci_cmd_list_caps *cmd;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ixgbe_aci_opc_list_func_caps &&
+ opc != ixgbe_aci_opc_list_dev_caps)
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
+ err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (cap_count)
+ *cap_count = le32_to_cpu(cmd->count);
+
+ return err;
+}
+
+/**
+ * ixgbe_discover_dev_caps - Read and extract device capabilities
+ * @hw: pointer to the hardware structure
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps)
+{
+ u32 cap_count;
+ u8 *cbuf;
+ int err;
+
+ cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_dev_caps);
+ if (!err)
+ ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+
+ kfree(cbuf);
+
+ return 0;
+}
+
+/**
+ * ixgbe_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps)
+{
+ u32 cap_count;
+ u8 *cbuf;
+ int err;
+
+ cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_func_caps);
+ if (!err)
+ ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
+
+ kfree(cbuf);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ *
+ * Retrieve both device and function capabilities.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_caps(struct ixgbe_hw *hw)
+{
+ int err;
+
+ err = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
+ if (err)
+ return err;
+
+ return ixgbe_discover_func_caps(hw, &hw->func_caps);
+}
+
+/**
+ * ixgbe_aci_disable_rxen - disable RX
+ * @hw: pointer to the HW struct
+ *
+ * Request a safe disable of Receive Enable using ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_disable_rxen *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.disable_rxen;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
+
+ cmd->lport_num = hw->bus.func;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_phy_caps - returns PHY capabilities
+ * @hw: pointer to the HW struct
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ *
+ * Returns the various PHY capabilities supported on the Port
+ * using ACI command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
+{
+ struct ixgbe_aci_cmd_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ cmd = &desc.params.get_phy;
+
+ if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM);
+
+ cmd->param0 |= cpu_to_le16(report_mode);
+ err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
+ if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
+ hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
+ hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
+ memcpy(hw->link.link_info.module_type, &pcaps->module_type,
+ sizeof(hw->link.link_info.module_type));
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy data from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy data from PHY capabilities data structure
+ * to PHY configuration data structure
+ */
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ memset(cfg, 0, sizeof(*cfg));
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+}
+
+/**
+ * ixgbe_aci_set_phy_cfg - set PHY configuration
+ * @hw: pointer to the HW struct
+ * @cfg: structure with PHY configuration data to be set
+ *
+ * Set the various PHY configuration parameters supported on the Port
+ * using ACI command (0x0601).
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ if (!cfg)
+ return -EINVAL;
+
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
+ desc.params.set_phy.lport_num = hw->bus.func;
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+ if (!err)
+ hw->phy.curr_user_phy_cfg = *cfg;
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_set_link_restart_an - set up link and restart AN
+ * @hw: pointer to the HW struct
+ * @ena_link: if true: enable link, if false: disable link
+ *
+ * Function sets up the link and restarts the Auto-Negotiation over the link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
+{
+ struct ixgbe_aci_cmd_restart_an *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.restart_an;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
+
+ cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
+ cmd->lport_num = hw->bus.func;
+ if (ena_link)
+ cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_is_media_cage_present - check if media cage is present
+ * @hw: pointer to the HW struct
+ *
+ * Identify presence of media cage using the ACI command (0x06E0).
+ *
+ * Return: true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_topo *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.get_link_topo;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+
+ cmd->addr.topo_params.node_type_ctx =
+ FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M,
+ IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT);
+
+ /* Set node type. */
+ cmd->addr.topo_params.node_type_ctx |=
+ FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M,
+ IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
+
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
+ * @hw: pointer to the HW struct
+ *
+ * Try to identify the media type based on the phy type.
+ * If more than one media type, the ixgbe_media_type_unknown is returned.
+ * First, phy_type_low is checked, then phy_type_high.
+ * If none are identified, the ixgbe_media_type_unknown is returned
+ *
+ * Return: type of a media based on phy type in form of enum.
+ */
+static enum ixgbe_media_type
+ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
+{
+ struct ixgbe_link_status *hw_link_info;
+
+ if (!hw)
+ return ixgbe_media_type_unknown;
+
+ hw_link_info = &hw->link.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ixgbe_media_type_unknown;
+
+ if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ixgbe_media_type_da;
+
+ switch (hw_link_info->phy_type_low) {
+ case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_LR:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_100BASE_TX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_T:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_T:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_T:
+ return ixgbe_media_type_copper;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR1:
+ return ixgbe_media_type_da;
+ case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C:
+ if (ixgbe_is_media_cage_present(hw))
+ return ixgbe_media_type_aui;
+ fallthrough;
+ case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_X:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR1:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S:
+ return ixgbe_media_type_backplane;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case IXGBE_PHY_TYPE_HIGH_10BASE_T:
+ return ixgbe_media_type_copper;
+ }
+ }
+ return ixgbe_media_type_unknown;
+}
+
+/**
+ * ixgbe_update_link_info - update status of the HW network link
+ * @hw: pointer to the HW struct
+ *
+ * Update the status of the HW network link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_update_link_info(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
+ struct ixgbe_link_status *li;
+ int err;
+
+ if (!hw)
+ return -EINVAL;
+
+ li = &hw->link.link_info;
+
+ err = ixgbe_aci_get_link_info(hw, true, NULL);
+ if (err)
+ return err;
+
+ if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ return 0;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ pcaps);
+
+ if (!err)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
+
+ kfree(pcaps);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_link_status - get status of the HW network link
+ * @hw: pointer to the HW struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
+{
+ if (!hw || !link_up)
+ return -EINVAL;
+
+ if (hw->link.get_link_info) {
+ int err = ixgbe_update_link_info(hw);
+
+ if (err)
+ return err;
+ }
+
+ *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_get_link_info - get the link status
+ * @hw: pointer to the HW struct
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ *
+ * Get the current Link Status using ACI command (0x607).
+ * The current link can be optionally provided to update
+ * the status.
+ *
+ * Return: the link status of the adapter.
+ */
+int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = {};
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_link_status *li_old, *li;
+ struct ixgbe_fc_info *hw_fc_info;
+ struct ixgbe_aci_desc desc;
+ bool tx_pause, rx_pause;
+ u8 cmd_flags;
+ int err;
+
+ if (!hw)
+ return -EINVAL;
+
+ li_old = &hw->link.link_info_old;
+ li = &hw->link.link_info;
+ hw_fc_info = &hw->fc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = cpu_to_le16(cmd_flags);
+ resp->lport_num = hw->bus.func;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+ if (err)
+ return err;
+
+ /* Save off old link status information. */
+ *li_old = *li;
+
+ /* Update current link status information. */
+ li->link_speed = le16_to_cpu(link_data.link_speed);
+ li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
+ li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
+ IXGBE_ACI_CFG_PACING_TYPE_M);
+
+ /* Update fc info. */
+ tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_full;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_tx_pause;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_rx_pause;
+ else
+ hw_fc_info->current_mode = ixgbe_fc_none;
+
+ li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) &
+ IXGBE_ACI_LSE_IS_ENABLED);
+
+ /* Save link status information. */
+ if (link)
+ *link = *li;
+
+ /* Flag cleared so calling functions don't call AQ again. */
+ hw->link.get_link_info = false;
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_set_event_mask - set event mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ *
+ * Set the event mask using ACI command (0x0613).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
+{
+ struct ixgbe_aci_cmd_set_event_mask *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = cpu_to_le16(mask);
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_configure_lse - enable/disable link status events
+ * @hw: pointer to the HW struct
+ * @activate: true for enable lse, false otherwise
+ * @mask: event mask to be set; a set bit means deactivation of the
+ * corresponding event
+ *
+ * Set the event mask and then enable or disable link status events
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
+{
+ int err;
+
+ err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
+ if (err)
+ return err;
+
+ /* Enabling link status events generation by fw. */
+ return ixgbe_aci_get_link_info(hw, activate, NULL);
+}
+
+/**
+ * ixgbe_start_hw_e610 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Get firmware version and start the hardware using the generic
+ * start_hw() and ixgbe_start_hw_gen2() functions.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_start_hw_e610(struct ixgbe_hw *hw)
+{
+ int err;
+
+ err = ixgbe_aci_get_fw_ver(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_start_hw_generic(hw);
+ if (err)
+ return err;
+
+ ixgbe_start_hw_gen2(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_set_port_id_led - set LED value for the given port
+ * @hw: pointer to the HW struct
+ * @orig_mode: set LED original mode
+ *
+ * Set LED value for the given port (0x06E9)
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
+{
+ struct ixgbe_aci_cmd_set_port_id_led *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_port_id_led;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
+
+ cmd->lport_num = (u8)hw->bus.func;
+ cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
+
+ if (orig_mode)
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_get_media_type_e610 - Gets media type
+ * @hw: pointer to the HW struct
+ *
+ * In order to get the media type, the function gets PHY
+ * capabilities and later on use them to identify the PHY type
+ * checking phy_type_high and phy_type_low.
+ *
+ * Return: the type of media in form of ixgbe_media_type enum
+ * or ixgbe_media_type_unknown in case of an error.
+ */
+enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ int rc;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc)
+ return ixgbe_media_type_unknown;
+
+ /* If there is no link but PHY (dongle) is available SW should use
+ * Get PHY Caps admin command instead of Get Link Status, find most
+ * significant bit that is set in PHY types reported by the command
+ * and use it to discover media type.
+ */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
+ int highest_bit;
+
+ /* Get PHY Capabilities */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return ixgbe_media_type_unknown;
+
+ highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high));
+ if (highest_bit) {
+ hw->link.link_info.phy_type_high =
+ BIT_ULL(highest_bit - 1);
+ hw->link.link_info.phy_type_low = 0;
+ } else {
+ highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low));
+ if (highest_bit) {
+ hw->link.link_info.phy_type_low =
+ BIT_ULL(highest_bit - 1);
+ hw->link.link_info.phy_type_high = 0;
+ }
+ }
+ }
+
+ /* Based on link status or search above try to discover media type. */
+ hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
+
+ return hw->phy.media_type;
+}
+
+/**
+ * ixgbe_setup_link_e610 - Set up link
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ *
+ * Set up the link with the specified speed.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ /* Simply request FW to perform proper PHY setup */
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_check_link_e610 - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Determine if the link is up and the current link speed
+ * using ACI command (0x0607).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ int err;
+ u32 i;
+
+ if (!speed || !link_up)
+ return -EINVAL;
+
+ /* Set get_link_info flag to ensure that fresh
+ * link information will be obtained from FW
+ * by sending Get Link Status admin command.
+ */
+ hw->link.get_link_info = true;
+
+ /* Update link information in adapter context. */
+ err = ixgbe_get_link_status(hw, link_up);
+ if (err)
+ return err;
+
+ /* Wait for link up if it was requested. */
+ if (link_up_wait_to_complete && !(*link_up)) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ msleep(100);
+ hw->link.get_link_info = true;
+ err = ixgbe_get_link_status(hw, link_up);
+ if (err)
+ return err;
+ if (*link_up)
+ break;
+ }
+ }
+
+ /* Use link information in adapter context updated by the call
+ * to ixgbe_get_link_status() to determine current link speed.
+ * Link speed information is valid only when link up was
+ * reported by FW.
+ */
+ if (*link_up) {
+ switch (hw->link.link_info.link_speed) {
+ case IXGBE_ACI_LINK_SPEED_10MB:
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_100MB:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_1000MB:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_2500MB:
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_5GB:
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_10GB:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_link_capabilities_e610 - Determine link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determine speed and AN parameters of a link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ if (!speed || !autoneg)
+ return -EINVAL;
+
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+
+ return 0;
+}
+
+/**
+ * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
+ * @hw: pointer to hardware structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
+ *
+ * Configures PHY Flow Control according to the provided configuration.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode)
+{
+ u8 pause_mask = 0x0;
+
+ if (!cfg)
+ return -EINVAL;
+
+ switch (req_mode) {
+ case ixgbe_fc_full:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the old pause settings. */
+ cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
+ IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
+
+ /* Set the new capabilities. */
+ cfg->caps |= pause_mask;
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_fc_e610 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Set up flow control. This has to be done during init time.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_fc_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {};
+ int err;
+
+ /* Get the current PHY config */
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
+
+ /* Configure the set PHY data */
+ err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
+ if (err)
+ return err;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps.caps) {
+ cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ err = ixgbe_aci_set_phy_cfg(hw, &cfg);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_fc_autoneg_e610 - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configure Flow Control.
+ */
+void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw)
+{
+ int err;
+
+ /* Get current link err.
+ * Current FC mode will be stored in the hw context.
+ */
+ err = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (err)
+ goto no_autoneg;
+
+ /* Check if the link is up */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP))
+ goto no_autoneg;
+
+ /* Check if auto-negotiation has completed */
+ if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED))
+ goto no_autoneg;
+
+ hw->fc.fc_was_autonegged = true;
+ return;
+
+no_autoneg:
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+}
+
+/**
+ * ixgbe_disable_rx_e610 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Disable RX DMA unit on E610 with use of ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_disable_rx_e610(struct ixgbe_hw *hw)
+{
+ u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ u32 pfdtxgswc;
+ int err;
+
+ if (!(rxctrl & IXGBE_RXCTRL_RXEN))
+ return;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ err = ixgbe_aci_disable_rxen(hw);
+
+ /* If we fail - disable RX using register write */
+ if (err) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+}
+
+/**
+ * ixgbe_fw_recovery_mode_e610 - Check FW NVM recovery mode
+ * @hw: pointer to hardware structure
+ *
+ * Check FW NVM recovery mode by reading the value of
+ * the dedicated register.
+ *
+ * Return: true if FW is in recovery mode, otherwise false.
+ */
+static bool ixgbe_fw_recovery_mode_e610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM);
+
+ return !!(fwsm & IXGBE_GL_MNG_FWSM_RECOVERY_M);
+}
+
+/**
+ * ixgbe_fw_rollback_mode_e610 - Check FW NVM rollback mode
+ * @hw: pointer to hardware structure
+ *
+ * Check FW NVM rollback mode by reading the value of
+ * the dedicated register.
+ *
+ * Return: true if FW is in rollback mode, otherwise false.
+ */
+static bool ixgbe_fw_rollback_mode_e610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM);
+
+ return !!(fwsm & IXGBE_GL_MNG_FWSM_ROLLBACK_M);
+}
+
+/**
+ * ixgbe_init_phy_ops_e610 - PHY specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY type was not known.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ phy->ops.set_phy_power = ixgbe_set_phy_power_e610;
+ else
+ phy->ops.set_phy_power = NULL;
+
+ /* Identify the PHY */
+ return phy->ops.identify(hw);
+}
+
+/**
+ * ixgbe_identify_phy_e610 - Identify PHY
+ * @hw: pointer to hardware structure
+ *
+ * Determine PHY type, supported speeds and PHY ID.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_type_low, phy_type_high;
+ int err;
+
+ /* Set PHY type */
+ hw->phy.type = ixgbe_phy_fw;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps);
+ if (err)
+ return err;
+
+ if (!(pcaps.module_compliance_enforcement &
+ IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
+ /* Handle lenient mode */
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
+ &pcaps);
+ if (err)
+ return err;
+ }
+
+ /* Determine supported speeds */
+ hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
+ phy_type_high = le64_to_cpu(pcaps.phy_type_high);
+ phy_type_low = le64_to_cpu(pcaps.phy_type_low);
+
+ if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* 2.5 and 5 Gbps link speeds must be excluded from the
+ * auto-negotiation set used during driver initialization due to
+ * compatibility issues with certain switches. Those issues do not
+ * exist in case of E610 2.5G SKU device (0x57b1).
+ */
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ /* Set PHY ID */
+ memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
+
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+
+ return 0;
+}
+
+/**
+ * ixgbe_identify_module_e610 - Identify SFP module type
+ * @hw: pointer to hardware structure
+ *
+ * Identify the SFP module type.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_identify_module_e610(struct ixgbe_hw *hw)
+{
+ bool media_available;
+ u8 module_type;
+ int err;
+
+ err = ixgbe_update_link_info(hw);
+ if (err)
+ return err;
+
+ media_available =
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE);
+
+ if (media_available) {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+ /* Get module type from hw context updated by
+ * ixgbe_update_link_info()
+ */
+ module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
+
+ if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ }
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Set the parameters for the firmware-controlled PHYs.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
+ u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
+ u64 sup_phy_type_low, sup_phy_type_high;
+ u64 phy_type_low = 0, phy_type_high = 0;
+ int err;
+
+ err = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (err)
+ return err;
+
+ /* If media is not available get default config. */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ rmode = IXGBE_ACI_REPORT_DFLT_CFG;
+
+ err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
+ if (err)
+ return err;
+
+ sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low);
+ sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high);
+
+ /* Get Active configuration to avoid unintended changes. */
+ err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
+
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
+ }
+
+ /* Mask the set values to avoid requesting unsupported link types. */
+ phy_type_low &= sup_phy_type_low;
+ pcfg.phy_type_low = cpu_to_le64(phy_type_low);
+ phy_type_high &= sup_phy_type_high;
+ pcfg.phy_type_high = cpu_to_le64(phy_type_high);
+
+ if (pcfg.phy_type_high != pcaps.phy_type_high ||
+ pcfg.phy_type_low != pcaps.phy_type_low ||
+ pcfg.caps != pcaps.caps) {
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ err = ixgbe_aci_set_phy_cfg(hw, &pcfg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_phy_power_e610 - Control power for copper PHY
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ *
+ * Set the power on/off of the PHY
+ * by getting its capabilities and setting the appropriate
+ * configuration parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
+ int err;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &phy_caps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ if (on)
+ phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
+ else
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
+
+ /* PHY is already in requested power mode. */
+ if (phy_caps.caps == phy_cfg.caps)
+ return 0;
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+}
+
+/**
+ * ixgbe_enter_lplu_e610 - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
+ int err;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &phy_caps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
+
+ return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+}
+
+/**
+ * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw
+ * struct in order to set up EEPROM access.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 gens_stat;
+ u8 sr_size;
+
+ if (eeprom->type != ixgbe_eeprom_uninitialized)
+ return 0;
+
+ eeprom->type = ixgbe_flash;
+
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
+
+ /* Switching to words (sr_size contains power of 2). */
+ eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type,
+ eeprom->word_size);
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node - get a node handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node and assigns it to
+ * the provided handle using ACI command (0x06E0).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+ return -EOPNOTSUPP;
+
+ if (node_handle)
+ *node_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return 0;
+}
+
+/**
+ * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * Request NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
+ return 0;
+
+ return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
+ IXGBE_NVM_TIMEOUT);
+}
+
+/**
+ * ixgbe_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * Release NVM ownership.
+ */
+void ixgbe_release_nvm(struct ixgbe_hw *hw)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
+ return;
+
+ ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
+}
+
+/**
+ * ixgbe_aci_read_nvm - read NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ *
+ * Read the NVM using ACI command (0x0701).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+
+ if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
+ return -EINVAL;
+
+ cmd = &desc.params.nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
+
+ if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = cpu_to_le16(length);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_erase_nvm - erase NVM sector
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ *
+ * Erase the NVM sector using the ACI command (0x0702).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+ __le16 len;
+ int err;
+
+ /* Read a length value from SR, so module_typeid is equal to 0,
+ * calculate offset where module size is placed from bytes to words
+ * set last command and read from SR values to true.
+ */
+ err = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
+ true);
+ if (err)
+ return err;
+
+ cmd = &desc.params.nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
+
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->length = len;
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_update_nvm - update NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ *
+ * Update the NVM using the ACI command (0x0703).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_U_MASK, offset);
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_nvm_write_activate - NVM activate write
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash using ACI command (0x0707).
+ *
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 err;
+
+ cmd = &desc.params.nvm;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_write_activate);
+
+ cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
+ cmd->offset_high = (u8)FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_A_MASK,
+ cmd_flags);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!err && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return err;
+}
+
+/**
+ * ixgbe_nvm_validate_checksum - validate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity using ACI command (0x0706).
+ * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ if (!err && cmd->checksum !=
+ cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) {
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+
+ err = -EIO;
+ netdev_err(adapter->netdev, "Invalid Shadow Ram checksum");
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_discover_flash_size - Discover the available flash size
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_discover_flash_size(struct ixgbe_hw *hw)
+{
+ u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
+ int err;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ err = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
+ if (err == -EIO &&
+ hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
+ err = 0;
+ max_size = offset;
+ } else if (!err) {
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ hw->flash.flash_size = max_size;
+
+err_read_flat_nvm:
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_base_address - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_sr_base_address(struct ixgbe_hw *hw, u16 offset,
+ u32 *pointer)
+{
+ u16 value;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, offset, &value);
+ if (err)
+ return err;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * SZ_4K;
+ else
+ *pointer = value * sizeof(u16);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
+{
+ u16 value;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, offset, &value);
+ if (err)
+ return err;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * SZ_4K;
+
+ return 0;
+}
+
+/**
+ * ixgbe_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ixgbe_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ u16 ctrl_word;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_NVM_CTRL_WORD,
+ &ctrl_word);
+ if (err)
+ return err;
+
+ if (FIELD_GET(IXGBE_SR_CTRL_WORD_1_M, ctrl_word) !=
+ IXGBE_SR_CTRL_WORD_VALID)
+ return -ENODATA;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
+ &banks->nvm_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NVM_BANK_SIZE,
+ &banks->nvm_size);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ &banks->orom_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_OROM_BANK_SIZE,
+ &banks->orom_size);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ &banks->netlist_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NETLIST_BANK_SIZE,
+ &banks->netlist_size);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Return: the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static int ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ enum ixgbe_flash_bank active_bank;
+ bool second_bank_active;
+ u32 offset, size;
+
+ switch (module) {
+ case IXGBE_E610_SR_1ST_NVM_BANK_PTR:
+ offset = banks->nvm_ptr;
+ size = banks->nvm_size;
+ active_bank = banks->nvm_bank;
+ break;
+ case IXGBE_E610_SR_1ST_OROM_BANK_PTR:
+ offset = banks->orom_ptr;
+ size = banks->orom_size;
+ active_bank = banks->orom_bank;
+ break;
+ case IXGBE_E610_SR_NETLIST_BANK_PTR:
+ offset = banks->netlist_ptr;
+ size = banks->netlist_size;
+ active_bank = banks->netlist_bank;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (active_bank) {
+ case IXGBE_1ST_FLASH_BANK:
+ second_bank_active = false;
+ break;
+ case IXGBE_2ND_FLASH_BANK:
+ second_bank_active = true;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The second flash bank is stored immediately following the first
+ * bank. Based on whether the 1st or 2nd bank is active, and whether
+ * we want the active or inactive bank, calculate the desired offset.
+ */
+ switch (bank) {
+ case IXGBE_ACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? size : 0);
+ case IXGBE_INACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? 0 : size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
+ * during initialization.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_flash_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module, u32 offset, u8 *data, u32 length)
+{
+ u32 start;
+ int err;
+
+ start = ixgbe_get_flash_bank_offset(hw, bank, module);
+ if (!start)
+ return -EINVAL;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_nvm_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local,
+ sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_netlist_module - Read data from the netlist module area
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive module
+ * @offset: offset into the netlist to read from
+ * @data: storage for returned word value
+ *
+ * Read a word from the specified netlist bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_netlist_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local, sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_orom_module - Read from the active Option ROM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive OROM module
+ * @offset: offset into the OROM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active Option ROM module of the flash.
+ * Note that unlike the NVM module, the CSS data is stored at the end of the
+ * module instead of at the beginning.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_orom_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local, sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_nvm_css_hdr_len - Read the CSS header length
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the
+ * Authentication header size, and then convert to words.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ int err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (err)
+ return err;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size.
+ */
+ hdr_len_dword = (hdr_len_h << 16) | hdr_len_l;
+ *hdr_len = hdr_len_dword * 2 + IXGBE_NVM_AUTH_HEADER_LEN;
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ u32 hdr_len;
+ int err;
+
+ err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (err)
+ return err;
+
+ hdr_len = round_up(hdr_len, IXGBE_HDR_LEN_ROUNDUP);
+
+ return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
+}
+
+/**
+ * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active NVM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank, u32 *srev)
+{
+ u16 srev_l, srev_h;
+ int err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
+ if (err)
+ return err;
+
+ *srev = (srev_h << 16) | srev_l;
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_orom_civd_data - Get the combo version information from Option ROM
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @civd: storage for the Option ROM CIVD data.
+ *
+ * Searches through the Option ROM flash contents to locate the CIVD data for
+ * the image.
+ *
+ * Return: the exit code of the operation.
+ */
+static int
+ixgbe_get_orom_civd_data(struct ixgbe_hw *hw, enum ixgbe_bank_select bank,
+ struct ixgbe_orom_civd_info *civd)
+{
+ struct ixgbe_orom_civd_info tmp;
+ u32 offset;
+ int err;
+
+ /* The CIVD section is located in the Option ROM aligned to 512 bytes.
+ * The first 4 bytes must contain the ASCII characters "$CIV".
+ * A simple modulo 256 sum of all of the bytes of the structure must
+ * equal 0.
+ */
+ for (offset = 0; (offset + SZ_512) <= hw->flash.banks.orom_size;
+ offset += SZ_512) {
+ u8 sum = 0;
+ u32 i;
+
+ err = ixgbe_read_flash_module(hw, bank,
+ IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ offset,
+ (u8 *)&tmp, sizeof(tmp));
+ if (err)
+ return err;
+
+ /* Skip forward until we find a matching signature */
+ if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp.signature,
+ sizeof(tmp.signature)))
+ continue;
+
+ /* Verify that the simple checksum is zero */
+ for (i = 0; i < sizeof(tmp); i++)
+ sum += ((u8 *)&tmp)[i];
+
+ if (sum)
+ return -EDOM;
+
+ *civd = tmp;
+ return 0;
+ }
+
+ return -ENODATA;
+}
+
+/**
+ * ixgbe_get_orom_srev - Read the security revision from the OROM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from active or inactive flash module
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active OROM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_orom_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *srev)
+{
+ u32 orom_size_word = hw->flash.banks.orom_size / 2;
+ u32 css_start, hdr_len;
+ u16 srev_l, srev_h;
+ int err;
+
+ err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (err)
+ return err;
+
+ if (orom_size_word < hdr_len)
+ return -EINVAL;
+
+ /* Calculate how far into the Option ROM the CSS header starts. Note
+ * that ixgbe_read_orom_module takes a word offset.
+ */
+ css_start = orom_size_word - hdr_len;
+ err = ixgbe_read_orom_module(hw, bank,
+ css_start + IXGBE_NVM_CSS_SREV_L,
+ &srev_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_orom_module(hw, bank,
+ css_start + IXGBE_NVM_CSS_SREV_H,
+ &srev_h);
+ if (err)
+ return err;
+
+ *srev = srev_h << 16 | srev_l;
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_orom_ver_info - Read Option ROM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @orom: pointer to Option ROM info structure
+ *
+ * Read Option ROM version and security revision from the Option ROM flash
+ * section.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_orom_info *orom)
+{
+ struct ixgbe_orom_civd_info civd;
+ u32 combo_ver;
+ int err;
+
+ err = ixgbe_get_orom_civd_data(hw, bank, &civd);
+ if (err)
+ return err;
+
+ combo_ver = le32_to_cpu(civd.combo_ver);
+
+ orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver);
+ orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver);
+ orom->build = (u16)FIELD_GET(IXGBE_OROM_VER_BUILD_MASK, combo_ver);
+
+ return ixgbe_get_orom_srev(hw, bank, &orom->srev);
+}
+
+/**
+ * ixgbe_get_inactive_orom_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @orom: storage for Option ROM version information
+ *
+ * Read the Option ROM version and security revision data for the inactive
+ * section of flash. Used to access version data for a pending update that has
+ * not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw,
+ struct ixgbe_orom_info *orom)
+{
+ return ixgbe_get_orom_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, orom);
+}
+
+/**
+ * ixgbe_get_nvm_ver_info - Read NVM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
+ *
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the nvm info structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_nvm_info *nvm)
+{
+ u16 eetrack_lo, eetrack_hi, ver;
+ int err;
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank,
+ IXGBE_E610_SR_NVM_DEV_STARTER_VER, &ver);
+ if (err)
+ return err;
+
+ nvm->major = FIELD_GET(IXGBE_E610_NVM_VER_HI_MASK, ver);
+ nvm->minor = FIELD_GET(IXGBE_E610_NVM_VER_LO_MASK, ver);
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_LO,
+ &eetrack_lo);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_HI,
+ &eetrack_hi);
+ if (err)
+ return err;
+
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Read the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * active NVM bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_netlist_info - Read the netlist version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @netlist: pointer to netlist version info structure
+ *
+ * Get the netlist version information from the requested bank. Reads the Link
+ * Topology section to find the Netlist ID block and extract the relevant
+ * information into the netlist version structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_netlist_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_netlist_info *netlist)
+{
+ u16 module_id, length, node_count, i;
+ u16 *id_blk;
+ int err;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
+ &module_id);
+ if (err)
+ return err;
+
+ if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID)
+ return -EIO;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
+ &length);
+ if (err)
+ return err;
+
+ /* Sanity check that we have at least enough words to store the
+ * netlist ID block.
+ */
+ if (length < IXGBE_NETLIST_ID_BLK_SIZE)
+ return -EIO;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
+ &node_count);
+ if (err)
+ return err;
+
+ node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
+
+ id_blk = kcalloc(IXGBE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
+ if (!id_blk)
+ return -ENOMEM;
+
+ /* Read out the entire Netlist ID Block at once. */
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ IXGBE_NETLIST_ID_BLK_OFFSET(node_count) *
+ sizeof(*id_blk), (u8 *)id_blk,
+ IXGBE_NETLIST_ID_BLK_SIZE *
+ sizeof(*id_blk));
+ if (err)
+ goto free_id_blk;
+
+ for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
+ id_blk[i] = le16_to_cpu(((__le16 *)id_blk)[i]);
+
+ netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
+ netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
+ netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
+ netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
+
+free_id_blk:
+ kfree(id_blk);
+ return err;
+}
+
+/**
+ * ixgbe_get_inactive_netlist_ver - Read netlist version from the inactive bank
+ * @hw: pointer to the HW struct
+ * @netlist: pointer to netlist version info structure
+ *
+ * Read the netlist version data from the inactive netlist bank. Used to
+ * extract version data of a pending flash update in order to display the
+ * version data.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist)
+{
+ return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
+}
+
+/**
+ * ixgbe_get_flash_data - get flash data
+ * @hw: pointer to the HW struct
+ *
+ * Read and populate flash data such as Shadow RAM size,
+ * max_timeout and blank_nvm_mode
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_flash_data(struct ixgbe_hw *hw)
+{
+ struct ixgbe_flash_info *flash = &hw->flash;
+ u32 fla, gens_stat;
+ u8 sr_size;
+ int err;
+
+ /* The SR size is stored regardless of the NVM programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
+
+ /* Switching to words (sr_size contains power of 2) */
+ flash->sr_words = BIT(sr_size) * (SZ_1K / sizeof(u16));
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if (fla & IXGBE_GLNVM_FLA_LOCKED_M) {
+ flash->blank_nvm_mode = false;
+ } else {
+ flash->blank_nvm_mode = true;
+ return -EIO;
+ }
+
+ err = ixgbe_discover_flash_size(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_determine_active_flash_banks(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->nvm);
+ if (err)
+ return err;
+
+ err = ixgbe_get_orom_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->orom);
+ if (err)
+ return err;
+
+ err = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->netlist);
+ return err;
+}
+
+/**
+ * ixgbe_aci_nvm_update_empr - update NVM using EMPR
+ * @hw: pointer to the HW struct
+ *
+ * Force EMP reset using ACI command (0x0709). This command allows SW to
+ * request an EMPR to activate new FW.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_update_empr);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/* ixgbe_nvm_set_pkg_data - NVM set package data
+ * @hw: pointer to the HW struct
+ * @del_pkg_data_flag: If is set then the current pkg_data store by FW
+ * is deleted.
+ * If bit is set to 1, then buffer should be size 0.
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ *
+ * Set package data using ACI command (0x070A).
+ * This command is equivalent to the reception of
+ * a PLDM FW Update GetPackageData cmd. This command should be sent
+ * as part of the NVM update as the first cmd in the flow.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag,
+ u8 *data, u16 length)
+{
+ struct ixgbe_aci_cmd_nvm_pkg_data *cmd;
+ struct ixgbe_aci_desc desc;
+
+ if (length != 0 && !data)
+ return -EINVAL;
+
+ cmd = &desc.params.pkg_data;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_pkg_data);
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ if (del_pkg_data_flag)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_PKG_DELETE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/* ixgbe_nvm_pass_component_tbl - NVM pass component table
+ * @hw: pointer to the HW struct
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ * @transfer_flag: parameter for determining stage of the update
+ * @comp_response: a pointer to the response from the 0x070B ACI.
+ * @comp_response_code: a pointer to the response code from the 0x070B ACI.
+ *
+ * Pass component table using ACI command (0x070B). This command is equivalent
+ * to the reception of a PLDM FW Update PassComponentTable cmd.
+ * This command should be sent once per component. It can be only sent after
+ * Set Package Data cmd and before actual update. FW will assume these
+ * commands are going to be sent until the TransferFlag is set to End or
+ * StartAndEnd.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code)
+{
+ struct ixgbe_aci_cmd_nvm_pass_comp_tbl *cmd;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ if (!data || !comp_response || !comp_response_code)
+ return -EINVAL;
+
+ cmd = &desc.params.pass_comp_tbl;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_pass_component_tbl);
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ cmd->transfer_flag = transfer_flag;
+ err = ixgbe_aci_send_cmd(hw, &desc, data, length);
+ if (!err) {
+ *comp_response = cmd->component_response;
+ *comp_response_code = cmd->component_response_code;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ u32 bytes = sizeof(u16);
+ u16 data_local;
+ int err;
+
+ err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (err)
+ return err;
+
+ *data = data_local;
+ return 0;
+}
+
+/**
+ * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
+ * from being exceeded in case of Shadow RAM read requests and ensures that no
+ * single read request exceeds the maximum 4KB read for a single admin command.
+ *
+ * Returns an error code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram)
+{
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
+ int err;
+
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) >
+ (hw->eeprom.word_size * 2u)))
+ return -EINVAL;
+
+ do {
+ u32 read_size, sector_offset;
+
+ /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also 4KB.
+ */
+ sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
+ read_size = min_t(u32,
+ IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
+ inlen - bytes_read);
+
+ last_cmd = !(bytes_read + read_size < inlen);
+
+ /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
+ * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
+ * maximum size guarantees that it will fit within the 2 bytes.
+ */
+ err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
+ offset, (u16)read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram);
+ if (err)
+ break;
+
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM
+ * ownership.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data)
+{
+ u32 bytes = *words * 2;
+ int err;
+
+ err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+ if (err)
+ return err;
+
+ *words = bytes / 2;
+
+ for (int i = 0; i < *words; i++)
+ data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_word_aci(hw, offset, data);
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI
+ * @hw: pointer to hardware structure
+ * @offset: offset of words in the EEPROM to read
+ * @words: number of words to read
+ * @data: words to read from the EEPROM
+ *
+ * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params
+ * prior to the read. Acquire/release the NVM ownership.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_nvm_validate_checksum(hw);
+ if (err)
+ return err;
+
+ if (checksum_val) {
+ u16 tmp_checksum;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_word_aci(hw, IXGBE_E610_SR_SW_CHECKSUM_WORD,
+ &tmp_checksum);
+ ixgbe_release_nvm(hw);
+
+ if (!err)
+ *checksum_val = tmp_checksum;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_reset_hw_e610 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and performs a reset.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_reset_hw_e610(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ int err;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ err = hw->mac.ops.stop_adapter(hw);
+ if (err)
+ goto reset_hw_out;
+
+ /* Flush pending Tx transactions. */
+ ixgbe_clear_tx_pending(hw);
+
+ hw->phy.ops.init(hw);
+mac_reset_top:
+ err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (err)
+ return -EBUSY;
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+
+ err = -EIO;
+ netdev_err(adapter->netdev, "Reset polling failed to complete.");
+ }
+
+ /* Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ msleep(100);
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17));
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Maximum number of Receive Address Registers. */
+#define IXGBE_MAX_NUM_RAR 128
+
+ /* Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to the
+ * maximum number of Receive Address Registers, since we modify this
+ * value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Initialize bus function number */
+ hw->mac.ops.set_lan_id(hw);
+
+reset_hw_out:
+ return err;
+}
+
+/**
+ * ixgbe_get_pfa_module_tlv - Read sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Find the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
+ u16 *module_tlv_len, u16 module_type)
+{
+ u16 pfa_len, pfa_ptr, pfa_end_ptr;
+ u16 next_tlv;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_PFA_PTR, &pfa_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_ee_aci_e610(hw, pfa_ptr, &pfa_len);
+ if (err)
+ return err;
+
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ pfa_end_ptr = pfa_ptr + pfa_len;
+ while (next_tlv < pfa_end_ptr) {
+ u16 tlv_sub_module_type, tlv_len;
+
+ /* Read TLV type */
+ err = ixgbe_read_ee_aci_e610(hw, next_tlv,
+ &tlv_sub_module_type);
+ if (err)
+ break;
+
+ /* Read TLV length */
+ err = ixgbe_read_ee_aci_e610(hw, next_tlv + 1, &tlv_len);
+ if (err)
+ break;
+
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return 0;
+ }
+ return -EIO;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length).
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return -ENODATA;
+}
+
+/**
+ * ixgbe_read_pba_string_e610 - Read PBA string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Read the part number string from the NVM.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_pba_string_e610(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ u16 pba_word, pba_size;
+ int err;
+
+ *pba_num = '\0';
+
+ err = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ IXGBE_E610_SR_PBA_BLOCK_PTR);
+ if (err)
+ return err;
+
+ /* pba_size is the next word */
+ err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2), &pba_size);
+ if (err)
+ return err;
+
+ if (pba_tlv_len < pba_size)
+ return -EINVAL;
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size).
+ */
+ pba_size--;
+
+ if (pba_num_size < (((u32)pba_size * 2) + 1))
+ return -EINVAL;
+
+ for (u16 i = 0; i < pba_size; i++) {
+ err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2 + 1) + i,
+ &pba_word);
+ if (err)
+ return err;
+
+ pba_num[(i * 2)] = FIELD_GET(IXGBE_E610_SR_PBA_BLOCK_MASK,
+ pba_word);
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+
+ pba_num[(pba_size * 2)] = '\0';
+
+ return err;
+}
+
+static const struct ixgbe_mac_operations mac_ops_e610 = {
+ .init_hw = ixgbe_init_hw_generic,
+ .start_hw = ixgbe_start_hw_e610,
+ .clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic,
+ .enable_rx_dma = ixgbe_enable_rx_dma_generic,
+ .get_mac_addr = ixgbe_get_mac_addr_generic,
+ .get_device_caps = ixgbe_get_device_caps_generic,
+ .stop_adapter = ixgbe_stop_adapter_generic,
+ .set_lan_id = ixgbe_set_lan_id_multi_port_pcie,
+ .set_rxpba = ixgbe_set_rxpba_generic,
+ .check_link = ixgbe_check_link_e610,
+ .blink_led_start = ixgbe_blink_led_start_X540,
+ .blink_led_stop = ixgbe_blink_led_stop_X540,
+ .set_rar = ixgbe_set_rar_generic,
+ .clear_rar = ixgbe_clear_rar_generic,
+ .set_vmdq = ixgbe_set_vmdq_generic,
+ .set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic,
+ .clear_vmdq = ixgbe_clear_vmdq_generic,
+ .init_rx_addrs = ixgbe_init_rx_addrs_generic,
+ .update_mc_addr_list = ixgbe_update_mc_addr_list_generic,
+ .enable_mc = ixgbe_enable_mc_generic,
+ .disable_mc = ixgbe_disable_mc_generic,
+ .clear_vfta = ixgbe_clear_vfta_generic,
+ .set_vfta = ixgbe_set_vfta_generic,
+ .fc_enable = ixgbe_fc_enable_generic,
+ .set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550,
+ .init_uta_tables = ixgbe_init_uta_tables_generic,
+ .set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing,
+ .set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing,
+ .set_source_address_pruning =
+ ixgbe_set_source_address_pruning_x550,
+ .set_ethertype_anti_spoofing =
+ ixgbe_set_ethertype_anti_spoofing_x550,
+ .disable_rx_buff = ixgbe_disable_rx_buff_generic,
+ .enable_rx_buff = ixgbe_enable_rx_buff_generic,
+ .enable_rx = ixgbe_enable_rx_generic,
+ .disable_rx = ixgbe_disable_rx_e610,
+ .led_on = ixgbe_led_on_generic,
+ .led_off = ixgbe_led_off_generic,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
+ .reset_hw = ixgbe_reset_hw_e610,
+ .get_fw_ver = ixgbe_aci_get_fw_ver,
+ .get_media_type = ixgbe_get_media_type_e610,
+ .setup_link = ixgbe_setup_link_e610,
+ .fw_recovery_mode = ixgbe_fw_recovery_mode_e610,
+ .fw_rollback_mode = ixgbe_fw_rollback_mode_e610,
+ .get_nvm_ver = ixgbe_get_active_nvm_ver,
+ .get_link_capabilities = ixgbe_get_link_capabilities_e610,
+ .get_bus_info = ixgbe_get_bus_info_generic,
+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540,
+ .release_swfw_sync = ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = ixgbe_init_swfw_sync_X540,
+ .prot_autoc_read = prot_autoc_read_generic,
+ .prot_autoc_write = prot_autoc_write_generic,
+ .setup_fc = ixgbe_setup_fc_e610,
+ .fc_autoneg = ixgbe_fc_autoneg_e610,
+};
+
+static const struct ixgbe_phy_operations phy_ops_e610 = {
+ .init = ixgbe_init_phy_ops_e610,
+ .identify = ixgbe_identify_phy_e610,
+ .identify_sfp = ixgbe_identify_module_e610,
+ .setup_link_speed = ixgbe_setup_phy_link_speed_generic,
+ .setup_link = ixgbe_setup_phy_link_e610,
+ .enter_lplu = ixgbe_enter_lplu_e610,
+};
+
+static const struct ixgbe_eeprom_operations eeprom_ops_e610 = {
+ .read = ixgbe_read_ee_aci_e610,
+ .read_buffer = ixgbe_read_ee_aci_buffer_e610,
+ .validate_checksum = ixgbe_validate_eeprom_checksum_e610,
+ .read_pba_string = ixgbe_read_pba_string_e610,
+ .init_params = ixgbe_init_eeprom_params_e610,
+};
+
+const struct ixgbe_info ixgbe_e610_info = {
+ .mac = ixgbe_mac_e610,
+ .get_invariants = ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_e610,
+ .eeprom_ops = &eeprom_ops_e610,
+ .phy_ops = &phy_ops_e610,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
new file mode 100644
index 000000000000..bb31d65bd1c8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_E610_H_
+#define _IXGBE_E610_H_
+
+#include "ixgbe_type.h"
+
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size);
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
+int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending);
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode);
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res);
+int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc);
+int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps);
+int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps);
+int ixgbe_get_caps(struct ixgbe_hw *hw);
+int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw);
+int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps);
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link);
+int ixgbe_update_link_info(struct ixgbe_hw *hw);
+int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up);
+int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link);
+int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
+int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode);
+enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw);
+int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait);
+int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode);
+int ixgbe_setup_fc_e610(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_e610(struct ixgbe_hw *hw);
+int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw);
+int ixgbe_identify_phy_e610(struct ixgbe_hw *hw);
+int ixgbe_identify_module_e610(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw);
+int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on);
+int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw);
+int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw);
+int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access);
+void ixgbe_release_nvm(struct ixgbe_hw *hw);
+int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram);
+int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw,
+ struct ixgbe_orom_info *orom);
+int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist);
+int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram);
+int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data);
+int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val);
+int ixgbe_reset_hw_e610(struct ixgbe_hw *hw);
+int ixgbe_get_flash_data(struct ixgbe_hw *hw);
+int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw);
+int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag,
+ u8 *data, u16 length);
+int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code);
+int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid);
+int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags);
+int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags);
+
+#endif /* _IXGBE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9482e0cca8b7..d8a919ab7027 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/* ethtool support for ixgbe */
@@ -213,7 +213,7 @@ static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
static int ixgbe_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
bool autoneg = false;
@@ -458,7 +458,7 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
static int ixgbe_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
int err = 0;
@@ -535,7 +535,7 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
static void ixgbe_get_pause_stats(struct net_device *netdev,
struct ethtool_pause_stats *stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw_stats *hwstats = &adapter->stats;
stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
@@ -545,7 +545,7 @@ static void ixgbe_get_pause_stats(struct net_device *netdev,
static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (ixgbe_device_supports_autoneg_fc(hw) &&
@@ -564,10 +564,26 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
}
}
+static void ixgbe_set_pauseparam_finalize(struct net_device *netdev,
+ struct ixgbe_fc_info *fc)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* If the thing changed then we'll update and use new autoneg. */
+ if (memcmp(fc, &hw->fc, sizeof(*fc))) {
+ hw->fc = *fc;
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+}
+
static int ixgbe_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fc_info fc = hw->fc;
@@ -592,27 +608,52 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
else
fc.requested_mode = ixgbe_fc_none;
- /* if the thing changed then we'll update and use new autoneg */
- if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
- hw->fc = fc;
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
+ ixgbe_set_pauseparam_finalize(netdev, &fc);
+
+ return 0;
+}
+
+static int ixgbe_set_pauseparam_e610(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fc_info fc = hw->fc;
+
+ if (!ixgbe_device_supports_autoneg_fc(hw))
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg == AUTONEG_DISABLE) {
+ netdev_info(netdev,
+ "Cannot disable autonegotiation on this device.\n");
+ return -EOPNOTSUPP;
}
+ fc.disable_fc_autoneg = false;
+
+ if (pause->rx_pause && pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_full;
+ else if (pause->rx_pause)
+ fc.requested_mode = ixgbe_fc_rx_pause;
+ else if (pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_tx_pause;
+ else
+ fc.requested_mode = ixgbe_fc_none;
+
+ ixgbe_set_pauseparam_finalize(netdev, &fc);
+
return 0;
}
static u32 ixgbe_get_msglevel(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->msg_enable;
}
static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->msg_enable = data;
}
@@ -627,7 +668,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
static void ixgbe_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u8 i;
@@ -690,6 +731,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -993,14 +1035,14 @@ static void ixgbe_get_regs(struct net_device *netdev,
static int ixgbe_get_eeprom_len(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->hw.eeprom.word_size * 2;
}
static int ixgbe_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 *eeprom_buff;
int first_word, last_word, eeprom_len;
@@ -1036,7 +1078,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
static int ixgbe_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 *eeprom_buff;
void *ptr;
@@ -1103,10 +1145,22 @@ err:
return ret_val;
}
+void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ ixgbe_get_flash_data(hw);
+ ixgbe_set_fw_version_e610(adapter);
+}
+
static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ /* need to refresh info for e610 in case fw reloads in runtime */
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_refresh_fw_version(adapter);
strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
@@ -1160,7 +1214,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
@@ -1175,7 +1229,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *temp_ring;
int i, j, err = 0;
u32 new_rx_count, new_tx_count;
@@ -1335,7 +1389,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
static void ixgbe_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *net_stats;
unsigned int start;
@@ -1613,6 +1667,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
toggle = 0x7FFFF30F;
test = reg_test_82599;
break;
@@ -1708,7 +1763,7 @@ static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
static irqreturn_t ixgbe_test_intr(int irq, void *data)
{
struct net_device *netdev = (struct net_device *) data;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
@@ -1874,6 +1929,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1935,6 +1991,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
@@ -2179,7 +2236,7 @@ out:
static void ixgbe_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
bool if_running = netif_running(netdev);
if (ixgbe_removed(adapter->hw.hw_addr)) {
@@ -2302,7 +2359,7 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
static void ixgbe_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
WAKE_BCAST | WAKE_MAGIC;
@@ -2324,7 +2381,7 @@ static void ixgbe_get_wol(struct net_device *netdev,
static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
WAKE_FILTER))
@@ -2349,9 +2406,53 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
+static int ixgbe_set_wol_acpi(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 grc;
+
+ if (ixgbe_wol_exclusion(adapter, wol))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ /* disable APM wakeup */
+ grc = IXGBE_READ_REG(hw, IXGBE_GRC_X550EM_a);
+ grc &= ~IXGBE_GRC_APME;
+ IXGBE_WRITE_REG(hw, IXGBE_GRC_X550EM_a, grc);
+
+ /* erase existing filters */
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= IXGBE_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= IXGBE_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= IXGBE_WUFC_BC;
+
+ IXGBE_WRITE_REG(hw, IXGBE_WUC, IXGBE_WUC_PME_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wol);
+
+ hw->wol_enabled = adapter->wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int ixgbe_set_wol_e610(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ if (wol->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
+ return ixgbe_set_wol_acpi(netdev, wol);
+ else
+ return ixgbe_set_wol(netdev, wol);
+}
+
static int ixgbe_nway_reset(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -2362,7 +2463,7 @@ static int ixgbe_nway_reset(struct net_device *netdev)
static int ixgbe_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
@@ -2390,12 +2491,32 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
return 0;
}
+static int ixgbe_set_phys_id_e610(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ bool led_active;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ led_active = true;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ led_active = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ixgbe_aci_set_port_id_led(&adapter->hw, !led_active);
+}
+
static int ixgbe_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* only valid if in constant ITR mode */
if (adapter->rx_itr_setting <= 1)
@@ -2451,7 +2572,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_q_vector *q_vector;
int i;
u16 tx_itr_param, rx_itr_param, tx_itr_prev;
@@ -2677,7 +2798,7 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
@@ -3065,7 +3186,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
@@ -3092,7 +3213,7 @@ static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
static u32 ixgbe_rss_indir_size(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return ixgbe_rss_indir_tbl_entries(adapter);
}
@@ -3112,7 +3233,7 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
static int ixgbe_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -3130,7 +3251,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
@@ -3172,7 +3293,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
static int ixgbe_get_ts_info(struct net_device *dev,
struct kernel_ethtool_ts_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
/* we always support timestamping disabled */
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
@@ -3181,6 +3302,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
break;
case ixgbe_mac_X540:
@@ -3247,7 +3369,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
static void ixgbe_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
/* report maximum channels */
ch->max_combined = ixgbe_max_channels(adapter);
@@ -3284,7 +3406,7 @@ static void ixgbe_get_channels(struct net_device *dev,
static int ixgbe_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
unsigned int count = ch->combined_count;
u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
@@ -3322,7 +3444,7 @@ static int ixgbe_set_channels(struct net_device *dev,
static int ixgbe_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
@@ -3368,7 +3490,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee,
u8 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
int status = -EFAULT;
u8 databyte = 0xFF;
@@ -3464,7 +3586,7 @@ ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
@@ -3478,7 +3600,7 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct ethtool_keee eee_data;
int ret_val;
@@ -3533,7 +3655,7 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
static u32 ixgbe_get_priv_flags(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
u32 priv_flags = 0;
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
@@ -3550,7 +3672,7 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev)
static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
unsigned int flags2 = adapter->flags2;
unsigned int i;
@@ -3633,7 +3755,57 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_link_ksettings = ixgbe_set_link_ksettings,
};
+static const struct ethtool_ops ixgbe_ethtool_ops_e610 = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_drvinfo = ixgbe_get_drvinfo,
+ .get_regs_len = ixgbe_get_regs_len,
+ .get_regs = ixgbe_get_regs,
+ .get_wol = ixgbe_get_wol,
+ .set_wol = ixgbe_set_wol_e610,
+ .nway_reset = ixgbe_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = ixgbe_get_eeprom_len,
+ .get_eeprom = ixgbe_get_eeprom,
+ .set_eeprom = ixgbe_set_eeprom,
+ .get_ringparam = ixgbe_get_ringparam,
+ .set_ringparam = ixgbe_set_ringparam,
+ .get_pause_stats = ixgbe_get_pause_stats,
+ .get_pauseparam = ixgbe_get_pauseparam,
+ .set_pauseparam = ixgbe_set_pauseparam_e610,
+ .get_msglevel = ixgbe_get_msglevel,
+ .set_msglevel = ixgbe_set_msglevel,
+ .self_test = ixgbe_diag_test,
+ .get_strings = ixgbe_get_strings,
+ .set_phys_id = ixgbe_set_phys_id_e610,
+ .get_sset_count = ixgbe_get_sset_count,
+ .get_ethtool_stats = ixgbe_get_ethtool_stats,
+ .get_coalesce = ixgbe_get_coalesce,
+ .set_coalesce = ixgbe_set_coalesce,
+ .get_rxnfc = ixgbe_get_rxnfc,
+ .set_rxnfc = ixgbe_set_rxnfc,
+ .get_rxfh_indir_size = ixgbe_rss_indir_size,
+ .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
+ .get_rxfh = ixgbe_get_rxfh,
+ .set_rxfh = ixgbe_set_rxfh,
+ .get_eee = ixgbe_get_eee,
+ .set_eee = ixgbe_set_eee,
+ .get_channels = ixgbe_get_channels,
+ .set_channels = ixgbe_set_channels,
+ .get_priv_flags = ixgbe_get_priv_flags,
+ .set_priv_flags = ixgbe_set_priv_flags,
+ .get_ts_info = ixgbe_get_ts_info,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
+ .get_link_ksettings = ixgbe_get_link_ksettings,
+ .set_link_ksettings = ixgbe_set_link_ksettings,
+};
+
void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &ixgbe_ethtool_ops;
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ netdev->ethtool_ops = &ixgbe_ethtool_ops_e610;
+ else
+ netdev->ethtool_ops = &ixgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 955dced844a9..7dcf6ecd157b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -56,7 +56,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
if (xid >= netdev->fcoe_ddp_xid)
return 0;
- adapter = netdev_priv(netdev);
+ adapter = ixgbe_from_netdev(netdev);
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
@@ -153,7 +153,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
if (!netdev || !sgl)
return 0;
- adapter = netdev_priv(netdev);
+ adapter = ixgbe_from_netdev(netdev);
if (xid >= netdev->fcoe_ddp_xid) {
e_warn(drv, "xid=0x%x out-of-range\n", xid);
return 0;
@@ -834,7 +834,7 @@ static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
*/
int ixgbe_fcoe_enable(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
atomic_inc(&fcoe->refcnt);
@@ -881,7 +881,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
*/
int ixgbe_fcoe_disable(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
return -EINVAL;
@@ -927,7 +927,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
{
u16 prefix = 0xffff;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
switch (type) {
@@ -967,7 +967,7 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u64 dsn;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
new file mode 100644
index 000000000000..49d3b66add7e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2025 Intel Corporation. */
+
+#include <linux/crc32.h>
+#include <linux/pldmfw.h>
+#include <linux/uuid.h>
+
+#include "ixgbe.h"
+#include "ixgbe_fw_update.h"
+
+struct ixgbe_fwu_priv {
+ struct pldmfw context;
+
+ struct ixgbe_adapter *adapter;
+ struct netlink_ext_ack *extack;
+
+ /* Track which NVM banks to activate at the end of the update */
+ u8 activate_flags;
+ bool emp_reset_available;
+};
+
+/**
+ * ixgbe_send_package_data - Send record package data to firmware
+ * @context: PLDM fw update structure
+ * @data: pointer to the package data
+ * @length: length of the package data
+ *
+ * Send a copy of the package data associated with the PLDM record matching
+ * this device to the firmware.
+ *
+ * Note that this function sends an AdminQ command that will fail unless the
+ * NVM resource has been acquired.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int ixgbe_send_package_data(struct pldmfw *context,
+ const u8 *data, u16 length)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 *package_data;
+ int err;
+
+ package_data = kmemdup(data, length, GFP_KERNEL);
+ if (!package_data)
+ return -ENOMEM;
+
+ err = ixgbe_nvm_set_pkg_data(hw, false, package_data, length);
+
+ kfree(package_data);
+
+ return err;
+}
+
+/**
+ * ixgbe_check_component_response - Report firmware response to a component
+ * @adapter: device private data structure
+ * @response: indicates whether this component can be updated
+ * @code: code indicating reason for response
+ * @extack: netlink extended ACK structure
+ *
+ * Check whether firmware indicates if this component can be updated. Report
+ * a suitable error message over the netlink extended ACK if the component
+ * cannot be updated.
+ *
+ * Return: 0 if the component can be updated, or -ECANCELED if the
+ * firmware indicates the component cannot be updated.
+ */
+static int ixgbe_check_component_response(struct ixgbe_adapter *adapter,
+ u8 response, u8 code,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (response) {
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED:
+ /* Firmware indicated this update is good to proceed. */
+ return 0;
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware recommends not updating, as it may result in a downgrade. Continuing anyways");
+ return 0;
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
+ NL_SET_ERR_MSG_MOD(extack, "Firmware has rejected updating.");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_PARTIAL_CHECK:
+ if (hw->mac.ops.fw_recovery_mode &&
+ hw->mac.ops.fw_recovery_mode(hw))
+ return 0;
+ break;
+ }
+
+ switch (code) {
+ case IXGBE_ACI_NVM_PASS_COMP_STAMP_IDENTICAL_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is identical to running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_STAMP_LOWER:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is lower than running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_INVALID_STAMP_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is invalid");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_CONFLICT_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component table conflict occurred");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component pre-requisites not met");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_NOT_SUPPORTED_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component not supported");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component cannot be downgraded");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete component image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component version is identical to running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_VER_STR_LOWER_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component version is lower than the running image");
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Received unexpected response code from firmware");
+ break;
+ }
+
+ return -ECANCELED;
+}
+
+/**
+ * ixgbe_send_component_table - Send PLDM component table to firmware
+ * @context: PLDM fw update structure
+ * @component: the component to process
+ * @transfer_flag: relative transfer order of this component
+ *
+ * Read relevant data from the component and forward it to the device
+ * firmware. Check the response to determine if the firmware indicates that
+ * the update can proceed.
+ *
+ * This function sends ACI commands related to the NVM, and assumes that
+ * the NVM resource has been acquired.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_send_component_table(struct pldmfw *context,
+ struct pldmfw_component *component,
+ u8 transfer_flag)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ixgbe_aci_cmd_nvm_comp_tbl *comp_tbl;
+ u8 comp_response, comp_response_code;
+ struct ixgbe_hw *hw = &adapter->hw;
+ size_t length;
+ int err;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ case NVM_COMP_ID_NVM:
+ case NVM_COMP_ID_NETLIST:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to update due to unknown firmware component");
+ return -EOPNOTSUPP;
+ }
+
+ length = struct_size(comp_tbl, cvs, component->version_len);
+ comp_tbl = kzalloc(length, GFP_KERNEL);
+ if (!comp_tbl)
+ return -ENOMEM;
+
+ comp_tbl->comp_class = cpu_to_le16(component->classification);
+ comp_tbl->comp_id = cpu_to_le16(component->identifier);
+ comp_tbl->comp_class_idx = FWU_COMP_CLASS_IDX_NOT_USE;
+ comp_tbl->comp_cmp_stamp = cpu_to_le32(component->comparison_stamp);
+ comp_tbl->cvs_type = component->version_type;
+ comp_tbl->cvs_len = component->version_len;
+
+ memcpy(comp_tbl->cvs, component->version_string,
+ component->version_len);
+
+ err = ixgbe_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length,
+ transfer_flag, &comp_response,
+ &comp_response_code);
+
+ kfree(comp_tbl);
+
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transfer component table to firmware");
+ return -EIO;
+ }
+
+ return ixgbe_check_component_response(adapter,
+ comp_response,
+ comp_response_code, extack);
+}
+
+/**
+ * ixgbe_write_one_nvm_block - Write an NVM block and await completion response
+ * @adapter: the PF data structure
+ * @module: the module to write to
+ * @offset: offset in bytes
+ * @block_size: size of the block to write, up to 4k
+ * @block: pointer to block of data to write
+ * @last_cmd: whether this is the last command
+ * @extack: netlink extended ACK structure
+ *
+ * Write a block of data to a flash module, and await for the completion
+ * response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * On successful return, reset level indicates the device reset required to
+ * complete the update.
+ *
+ * 0 - IXGBE_ACI_NVM_POR_FLAG - A full power on is required
+ * 1 - IXGBE_ACI_NVM_PERST_FLAG - A cold PCIe reset is required
+ * 2 - IXGBE_ACI_NVM_EMPR_FLAG - An EMP reset is required
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_write_one_nvm_block(struct ixgbe_adapter *adapter,
+ u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ return ixgbe_aci_update_nvm(hw, module, offset, block_size, block,
+ last_cmd, 0);
+}
+
+/**
+ * ixgbe_write_nvm_module - Write data to an NVM module
+ * @adapter: the PF driver structure
+ * @module: the module id to program
+ * @component: the name of the component being updated
+ * @image: buffer of image data to write to the NVM
+ * @length: length of the buffer
+ * @extack: netlink extended ACK structure
+ *
+ * Loop over the data for a given NVM module and program it in 4 Kb
+ * blocks. Notify devlink core of progress after each block is programmed.
+ * Loops over a block of data and programs the NVM in 4k block chunks.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_write_nvm_module(struct ixgbe_adapter *adapter, u16 module,
+ const char *component, const u8 *image,
+ u32 length,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ u32 offset = 0;
+ bool last_cmd;
+ u8 *block;
+ int err;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, 0, length);
+
+ block = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
+ do {
+ u32 block_size;
+
+ block_size = min_t(u32, IXGBE_ACI_MAX_BUFFER_SIZE,
+ length - offset);
+ last_cmd = !(offset + block_size < length);
+
+ memcpy(block, image + offset, block_size);
+
+ err = ixgbe_write_one_nvm_block(adapter, module, offset,
+ block_size, block, last_cmd,
+ extack);
+ if (err)
+ break;
+
+ offset += block_size;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, offset, length);
+ } while (!last_cmd);
+
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Flashing failed",
+ component, length, length);
+ else
+ devlink_flash_update_status_notify(devlink, "Flashing done",
+ component, length, length);
+
+ kfree(block);
+
+ return err;
+}
+
+/* Length in seconds to wait before timing out when erasing a flash module.
+ * Yes, erasing really can take minutes to complete.
+ */
+#define IXGBE_FW_ERASE_TIMEOUT 300
+
+/**
+ * ixgbe_erase_nvm_module - Erase an NVM module and await firmware completion
+ * @adapter: the PF data structure
+ * @module: the module to erase
+ * @component: name of the component being updated
+ * @extack: netlink extended ACK structure
+ *
+ * Erase the inactive NVM bank associated with this module, and await for
+ * a completion response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_erase_nvm_module(struct ixgbe_adapter *adapter, u16 module,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ devlink_flash_update_timeout_notify(devlink, "Erasing", component,
+ IXGBE_FW_ERASE_TIMEOUT);
+
+ err = ixgbe_aci_erase_nvm(hw, module);
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Erasing failed",
+ component, 0, 0);
+ else
+ devlink_flash_update_status_notify(devlink, "Erasing done",
+ component, 0, 0);
+
+ return err;
+}
+
+/**
+ * ixgbe_switch_flash_banks - Tell firmware to switch NVM banks
+ * @adapter: Pointer to the PF data structure
+ * @activate_flags: flags used for the activation command
+ * @emp_reset_available: on return, indicates if EMP reset is available
+ * @extack: netlink extended ACK structure
+ *
+ * Notify firmware to activate the newly written flash banks, and wait for the
+ * firmware response.
+ *
+ * Return: 0 on success or an error code on failure.
+ */
+static int ixgbe_switch_flash_banks(struct ixgbe_adapter *adapter,
+ u8 activate_flags,
+ bool *emp_reset_available,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 response_flags;
+ int err;
+
+ err = ixgbe_nvm_write_activate(hw, activate_flags, &response_flags);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to switch active flash banks");
+ return err;
+ }
+
+ if (emp_reset_available) {
+ if (hw->dev_caps.common_cap.reset_restrict_support)
+ *emp_reset_available =
+ response_flags & IXGBE_ACI_NVM_EMPR_ENA;
+ else
+ *emp_reset_available = true;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_flash_component - Flash a component of the NVM
+ * @context: PLDM fw update structure
+ * @component: the component table to program
+ *
+ * Program the flash contents for a given component. First, determine the
+ * module id. Then, erase the secondary bank for this module. Finally, write
+ * the contents of the component to the NVM.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_flash_component(struct pldmfw *context,
+ struct pldmfw_component *component)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ixgbe_adapter *adapter = priv->adapter;
+ const char *name;
+ u16 module;
+ int err;
+ u8 flag;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ module = IXGBE_E610_SR_1ST_OROM_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+ name = "fw.undi";
+ break;
+ case NVM_COMP_ID_NVM:
+ module = IXGBE_E610_SR_1ST_NVM_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+ name = "fw.mgmt";
+ break;
+ case NVM_COMP_ID_NETLIST:
+ module = IXGBE_E610_SR_NETLIST_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+ name = "fw.netlist";
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Mark this component for activating at the end. */
+ priv->activate_flags |= flag;
+
+ err = ixgbe_erase_nvm_module(adapter, module, name, extack);
+ if (err)
+ return err;
+
+ return ixgbe_write_nvm_module(adapter, module, name,
+ component->component_data,
+ component->component_size, extack);
+}
+
+/**
+ * ixgbe_finalize_update - Perform last steps to complete device update
+ * @context: PLDM fw update structure
+ *
+ * Called as the last step of the update process. Complete the update by
+ * telling the firmware to switch active banks, and perform a reset of
+ * configured.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int ixgbe_finalize_update(struct pldmfw *context)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct netlink_ext_ack *extack = priv->extack;
+ struct devlink *devlink = adapter->devlink;
+ int err;
+
+ /* Finally, notify firmware to activate the written NVM banks */
+ err = ixgbe_switch_flash_banks(adapter, priv->activate_flags,
+ &priv->emp_reset_available, extack);
+ if (err)
+ return err;
+
+ adapter->fw_emp_reset_disabled = !priv->emp_reset_available;
+
+ if (!adapter->fw_emp_reset_disabled)
+ devlink_flash_update_status_notify(devlink,
+ "Suggested is to activate new firmware by devlink reload, if it doesn't work then a power cycle is required",
+ NULL, 0, 0);
+
+ return 0;
+}
+
+static const struct pldmfw_ops ixgbe_fwu_ops_e610 = {
+ .match_record = &pldmfw_op_pci_match_record,
+ .send_package_data = &ixgbe_send_package_data,
+ .send_component_table = &ixgbe_send_component_table,
+ .flash_component = &ixgbe_flash_component,
+ .finalize_update = &ixgbe_finalize_update,
+};
+
+/**
+ * ixgbe_get_pending_updates - Check if the component has a pending update
+ * @adapter: the PF driver structure
+ * @pending: on return, bitmap of updates pending
+ * @extack: Netlink extended ACK
+ *
+ * Check if the device has any pending updates on any flash components.
+ *
+ * Return: 0 on success, or a negative error code on failure. Update
+ * pending with the bitmap of pending updates.
+ */
+int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw_dev_caps *dev_caps;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL);
+ if (!dev_caps)
+ return -ENOMEM;
+
+ err = ixgbe_discover_dev_caps(hw, dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read device capabilities");
+ kfree(dev_caps);
+ return -EIO;
+ }
+
+ *pending = 0;
+
+ if (dev_caps->common_cap.nvm_update_pending_nvm)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+
+ if (dev_caps->common_cap.nvm_update_pending_orom)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+
+ if (dev_caps->common_cap.nvm_update_pending_netlist)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+
+ kfree(dev_caps);
+
+ return 0;
+}
+
+/**
+ * ixgbe_cancel_pending_update - Cancel any pending update for a component
+ * @adapter: the PF driver structure
+ * @component: if not NULL, the name of the component being updated
+ * @extack: Netlink extended ACK structure
+ *
+ * Cancel any pending update for the specified component. If component is
+ * NULL, all device updates will be canceled.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_cancel_pending_update(struct ixgbe_adapter *adapter,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 pending;
+ int err;
+
+ err = ixgbe_get_pending_updates(adapter, &pending, extack);
+ if (err)
+ return err;
+
+ /* If the flash_update request is for a specific component, ignore all
+ * of the other components.
+ */
+ if (component) {
+ if (strcmp(component, "fw.mgmt") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+ else if (strcmp(component, "fw.undi") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+ else if (strcmp(component, "fw.netlist") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+ else
+ return -EINVAL;
+ }
+
+ /* There is no previous pending update, so this request may continue */
+ if (!pending)
+ return 0;
+
+ /* In order to allow overwriting a previous pending update, notify
+ * firmware to cancel that update by issuing the appropriate command.
+ */
+ devlink_flash_update_status_notify(devlink,
+ "Canceling previous pending update",
+ component, 0, 0);
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ pending |= IXGBE_ACI_NVM_REVERT_LAST_ACTIV;
+ err = ixgbe_switch_flash_banks(adapter, pending, NULL, extack);
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_flash_pldm_image - Write a PLDM-formatted firmware image to the device
+ * @devlink: pointer to devlink associated with the device to update
+ * @params: devlink flash update parameters
+ * @extack: netlink extended ACK structure
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the firmware. Extract and write the flash data for each of the three
+ * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
+ * firmware once the data is written to the inactive banks.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int ixgbe_flash_pldm_image(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct device *dev = &adapter->pdev->dev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fwu_priv priv;
+ u8 preservation;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ switch (params->overwrite_mask) {
+ case 0:
+ /* preserve all settings and identifiers */
+ preservation = IXGBE_ACI_NVM_PRESERVE_ALL;
+ break;
+ case DEVLINK_FLASH_OVERWRITE_SETTINGS:
+ /* Overwrite settings, but preserve vital information such as
+ * device identifiers.
+ */
+ preservation = IXGBE_ACI_NVM_PRESERVE_SELECTED;
+ break;
+ case (DEVLINK_FLASH_OVERWRITE_SETTINGS |
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS):
+ /* overwrite both settings and identifiers, preserve nothing */
+ preservation = IXGBE_ACI_NVM_NO_PRESERVATION;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested overwrite mask is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ /* Cannot get caps in recovery mode, so lack of nvm_unified_update bit
+ * cannot lead to error
+ */
+ if (!hw->dev_caps.common_cap.nvm_unified_update &&
+ (hw->mac.ops.fw_recovery_mode &&
+ !hw->mac.ops.fw_recovery_mode(hw))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Current firmware does not support unified update");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&priv, 0, sizeof(priv));
+
+ priv.context.ops = &ixgbe_fwu_ops_e610;
+ priv.context.dev = dev;
+ priv.extack = extack;
+ priv.adapter = adapter;
+ priv.activate_flags = preservation;
+
+ devlink_flash_update_status_notify(devlink,
+ "Preparing to flash", NULL, 0, 0);
+
+ err = ixgbe_cancel_pending_update(adapter, NULL, extack);
+ if (err)
+ return err;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ err = pldmfw_flash_image(&priv.context, params->fw);
+ if (err == -ENOENT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image has no record matching this device");
+ } else if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to flash PLDM image");
+ }
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h
new file mode 100644
index 000000000000..abdd708c93df
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2025 Intel Corporation. */
+
+#ifndef _IXGBE_FW_UPDATE_H_
+#define _IXGBE_FW_UPDATE_H_
+
+int ixgbe_flash_pldm_image(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack);
+int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending,
+ struct netlink_ext_ack *extack);
+#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 866024f2b9ee..d1f4073b36f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -9,7 +9,7 @@
#define IXGBE_IPSEC_KEY_BITS 160
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
-static void ixgbe_ipsec_del_sa(struct xfrm_state *xs);
+static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs);
/**
* ixgbe_ipsec_set_tx_sa - set the Tx SA registers
@@ -321,7 +321,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
if (r->used) {
if (r->mode & IXGBE_RXTXMOD_VF)
- ixgbe_ipsec_del_sa(r->xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, r->xs);
else
ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi,
r->key, r->salt,
@@ -330,7 +330,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
if (t->used) {
if (t->mode & IXGBE_RXTXMOD_VF)
- ixgbe_ipsec_del_sa(t->xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, t->xs);
else
ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt);
}
@@ -417,6 +417,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
/**
* ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @dev: pointer to net device
* @xs: pointer to xfrm_state struct
* @mykey: pointer to key array to populate
* @mysalt: pointer to salt value to populate
@@ -424,10 +425,10 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
* This copies the protocol keys and salt to our own data tables. The
* 82599 family only supports the one algorithm.
**/
-static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int ixgbe_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -473,12 +474,13 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
+ * @dev: pointer to net device
* @xs: pointer to transformer state struct
**/
-static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
+static int ixgbe_ipsec_check_mgmt_ip(struct net_device *dev,
+ struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u32 mfval, manc, reg;
int num_filters = 4;
@@ -556,14 +558,15 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
/**
* ixgbe_ipsec_add_sa - program device with a security association
+ * @dev: pointer to device to program
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
+static int ixgbe_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
int checked, match, first;
@@ -581,7 +584,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
return -EINVAL;
}
- if (ixgbe_ipsec_check_mgmt_ip(xs)) {
+ if (ixgbe_ipsec_check_mgmt_ip(dev, xs)) {
NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters");
return -EINVAL;
}
@@ -615,7 +618,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
rsa.decrypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ ret = ixgbe_ipsec_parse_proto_keys(dev, xs, rsa.key, &rsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
@@ -724,7 +727,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
if (xs->id.proto & IPPROTO_ESP)
tsa.encrypt = xs->ealg || xs->aead;
- ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ ret = ixgbe_ipsec_parse_proto_keys(dev, xs, tsa.key, &tsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
@@ -752,12 +755,12 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
/**
* ixgbe_ipsec_del_sa - clear out this specific SA
+ * @dev: pointer to device to program
* @xs: pointer to transformer state struct
**/
-static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
+static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
u32 zerobuf[4] = {0, 0, 0, 0};
@@ -817,30 +820,9 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
}
}
-/**
- * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
- * @xs: pointer to transformer state struct
- **/
-static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- if (xs->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl != 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
.xdo_dev_state_add = ixgbe_ipsec_add_sa,
.xdo_dev_state_delete = ixgbe_ipsec_del_sa,
- .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
};
/**
@@ -862,7 +844,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
continue;
if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
ipsec->rx_tbl[i].vf == vf)
- ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs);
+ ixgbe_ipsec_del_sa(adapter->netdev,
+ ipsec->rx_tbl[i].xs);
}
/* search tx sa table */
@@ -871,7 +854,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
continue;
if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
ipsec->tx_tbl[i].vf == vf)
- ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs);
+ ixgbe_ipsec_del_sa(adapter->netdev,
+ ipsec->tx_tbl[i].xs);
}
}
@@ -951,7 +935,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
/* set up the HW offload */
- err = ixgbe_ipsec_add_sa(xs, NULL);
+ err = ixgbe_ipsec_add_sa(adapter->netdev, xs, NULL);
if (err)
goto err_aead;
@@ -1055,7 +1039,7 @@ int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
xs = ipsec->tx_tbl[sa_idx].xs;
}
- ixgbe_ipsec_del_sa(xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, xs);
/* remove the xs that was made-up in the add request */
kfree_sensitive(xs);
@@ -1073,7 +1057,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
struct ixgbe_ipsec_tx_data *itd)
{
- struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(tx_ring->netdev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct xfrm_state *xs;
struct sec_path *sp;
@@ -1163,7 +1147,7 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(rx_ring->netdev);
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 16fa621ce0ff..336d47ffb95a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_sriov.h"
@@ -107,6 +107,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (num_tcs > 4) {
/*
* TCs : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 2e38e8f6fac1..cba860f0e1f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -42,12 +42,14 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
+#include "ixgbe_e610.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
#include "ixgbe_sriov.h"
#include "ixgbe_model.h"
#include "ixgbe_txrx_common.h"
+#include "devlink/devlink.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -73,6 +75,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
[board_x550em_a] = &ixgbe_x550em_a_info,
[board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
+ [board_e610] = &ixgbe_e610_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -131,6 +134,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_BACKPLANE), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SFP), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_10G_T), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_2_5G_T), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SGMII), board_e610},
/* required last entry */
{0, }
};
@@ -173,6 +181,8 @@ static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
+static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *);
+static void ixgbe_watchdog_update_link(struct ixgbe_adapter *);
static const struct net_device_ops ixgbe_netdev_ops;
@@ -236,8 +246,11 @@ static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
* bandwidth details should be gathered from the parent bus instead of from the
* device. Used to ensure that various locations all have the correct device ID
* checks.
+ *
+ * Return: true if information should be collected from the parent bus, false
+ * otherwise
*/
-static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
+static bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_SFP_SF_QP:
@@ -876,6 +889,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -915,6 +929,7 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
@@ -1025,7 +1040,7 @@ static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
return ((head <= tail) ? tail : tail + ring->count) - head;
}
-static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
{
u32 tx_done = ixgbe_get_tx_completed(tx_ring);
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
@@ -1081,7 +1096,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
static int ixgbe_tx_maxrate(struct net_device *netdev,
int queue_index, u32 maxrate)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 bcnrc_val = ixgbe_link_mbps(adapter);
@@ -1909,10 +1924,6 @@ bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
{
struct net_device *netdev = rx_ring->netdev;
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* Verify netdev is present, and that packet does not have any
* errors that would be unacceptable to the netdev.
*/
@@ -2095,7 +2106,7 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
- if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ if (skb && IXGBE_CB(skb)->dma == rx_buffer->dma) {
/* the page has been released from the ring */
IXGBE_CB(skb)->page_released = true;
} else {
@@ -2220,9 +2231,9 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
-static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbe_run_xdp(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
@@ -2272,7 +2283,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
@@ -2330,6 +2341,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
unsigned int offset = rx_ring->rx_offset;
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -2375,12 +2387,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
#endif
- skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = ixgbe_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
xdp_xmit |= xdp_res;
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
@@ -2400,7 +2410,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -2414,7 +2424,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
+ if (xdp_res || ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
continue;
/* probably a little skewed due to removing CRC */
@@ -2515,6 +2525,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
@@ -2528,6 +2539,9 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
IXGBE_EIMS_MAILBOX |
IXGBE_EIMS_LSC);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ mask &= ~IXGBE_EIMS_FW_EVENT;
+
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -2744,6 +2758,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
@@ -2966,6 +2981,222 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
}
}
+/**
+ * ixgbe_check_phy_fw_load - check if PHY FW load failed
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Check if external PHY FW load failed and print an error message if it did.
+ */
+static void ixgbe_check_phy_fw_load(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ if (!(link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
+ adapter->flags2 &= ~IXGBE_FLAG2_PHY_FW_LOAD_FAILED;
+ return;
+ }
+
+ if (adapter->flags2 & IXGBE_FLAG2_PHY_FW_LOAD_FAILED)
+ return;
+
+ if (link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
+ netdev_err(adapter->netdev, "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
+ adapter->flags2 |= IXGBE_FLAG2_PHY_FW_LOAD_FAILED;
+ }
+}
+
+/**
+ * ixgbe_check_module_power - check module power level
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Check module power level returned by a previous call to aci_get_link_info
+ * and print error messages if module power level is not supported.
+ */
+static void ixgbe_check_module_power(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ /* If module power level is supported, clear the flag. */
+ if (!(link_cfg_err & (IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT |
+ IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED))) {
+ adapter->flags2 &= ~IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ return;
+ }
+
+ /* If IXGBE_FLAG2_MOD_POWER_UNSUPPORTED was previously set and the
+ * above block didn't clear this bit, there's nothing to do.
+ */
+ if (adapter->flags2 & IXGBE_FLAG2_MOD_POWER_UNSUPPORTED)
+ return;
+
+ if (link_cfg_err & IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT) {
+ netdev_err(adapter->netdev, "The installed module is incompatible with the device's NVM image. Cannot start link.\n");
+ adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ } else if (link_cfg_err & IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED) {
+ netdev_err(adapter->netdev, "The module's power requirements exceed the device's power supply. Cannot start link.\n");
+ adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ }
+}
+
+/**
+ * ixgbe_check_link_cfg_err - check if link configuration failed
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Print if any link configuration failure happens due to the value in the
+ * link_cfg_err parameter in the link info structure.
+ */
+static void ixgbe_check_link_cfg_err(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ ixgbe_check_module_power(adapter, link_cfg_err);
+ ixgbe_check_phy_fw_load(adapter, link_cfg_err);
+}
+
+/**
+ * ixgbe_process_link_status_event - process the link event
+ * @adapter: pointer to adapter structure
+ * @link_up: true if the physical link is up and false if it is down
+ * @link_speed: current link speed received from the link event
+ *
+ * Return: 0 on success or negative value on failure.
+ */
+static int
+ixgbe_process_link_status_event(struct ixgbe_adapter *adapter, bool link_up,
+ u16 link_speed)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int status;
+
+ /* Update the link info structures and re-enable link events,
+ * don't bail on failure due to other book keeping needed.
+ */
+ status = ixgbe_update_link_info(hw);
+ if (status)
+ e_dev_err("Failed to update link status, err %d aq_err %d\n",
+ status, hw->aci.last_status);
+
+ ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err);
+
+ /* Check if the link state is up after updating link info, and treat
+ * this event as an UP event since the link is actually UP now.
+ */
+ if (hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)
+ link_up = true;
+
+ /* Turn off PHY if media was removed. */
+ if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA) &&
+ !(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ adapter->flags2 |= IXGBE_FLAG2_NO_MEDIA;
+
+ if (link_up == adapter->link_up &&
+ link_up == netif_carrier_ok(adapter->netdev) &&
+ link_speed == adapter->link_speed)
+ return 0;
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ ixgbe_watchdog_update_link(adapter);
+
+ if (link_up)
+ ixgbe_watchdog_link_is_up(adapter);
+ else
+ ixgbe_watchdog_link_is_down(adapter);
+
+ return 0;
+}
+
+/**
+ * ixgbe_handle_link_status_event - handle link status event via ACI
+ * @adapter: pointer to adapter structure
+ * @e: event structure containing link status info
+ */
+static void
+ixgbe_handle_link_status_event(struct ixgbe_adapter *adapter,
+ struct ixgbe_aci_event *e)
+{
+ struct ixgbe_aci_cmd_get_link_status_data *link_data;
+ u16 link_speed;
+ bool link_up;
+
+ link_data = (struct ixgbe_aci_cmd_get_link_status_data *)e->msg_buf;
+
+ link_up = !!(link_data->link_info & IXGBE_ACI_LINK_UP);
+ link_speed = le16_to_cpu(link_data->link_speed);
+
+ if (ixgbe_process_link_status_event(adapter, link_up, link_speed))
+ e_dev_warn("Could not process link status event");
+}
+
+/**
+ * ixgbe_schedule_fw_event - schedule Firmware event
+ * @adapter: pointer to the adapter structure
+ *
+ * If the adapter is not in down, removing or resetting state,
+ * an event is scheduled.
+ */
+static void ixgbe_schedule_fw_event(struct ixgbe_adapter *adapter)
+{
+ if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_bit(__IXGBE_REMOVING, &adapter->state) &&
+ !test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ adapter->flags2 |= IXGBE_FLAG2_FW_ASYNC_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ }
+}
+
+/**
+ * ixgbe_aci_event_cleanup - release msg_buf memory
+ * @event: pointer to the event holding msg_buf to be released
+ *
+ * Clean memory allocated for event's msg_buf. Implements auto memory cleanup.
+ */
+static void ixgbe_aci_event_cleanup(struct ixgbe_aci_event *event)
+{
+ kfree(event->msg_buf);
+}
+
+/**
+ * ixgbe_handle_fw_event - handle Firmware event
+ * @adapter: pointer to the adapter structure
+ *
+ * Obtain an event from the ACI and then and then process it according to the
+ * type of the event and the opcode.
+ */
+static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_aci_event event __cleanup(ixgbe_aci_event_cleanup);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool pending = false;
+ int err;
+
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT)
+ adapter->flags2 &= ~IXGBE_FLAG2_FW_ASYNC_EVENT;
+ event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return;
+
+ do {
+ err = ixgbe_aci_get_event(hw, &event, &pending);
+ if (err)
+ break;
+
+ switch (le16_to_cpu(event.desc.opcode)) {
+ case ixgbe_aci_opc_get_link_status:
+ ixgbe_handle_link_status_event(adapter, &event);
+ break;
+ case ixgbe_aci_opc_temp_tca_event:
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+ ixgbe_down(adapter);
+ break;
+ default:
+ e_warn(hw, "unknown FW async event captured\n");
+ break;
+ }
+ } while (pending);
+}
+
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
@@ -2982,6 +3213,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -3035,6 +3267,9 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_e610:
+ mask |= IXGBE_EIMS_FW_EVENT;
+ fallthrough;
case ixgbe_mac_x550em_a:
if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
@@ -3091,12 +3326,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_msg_task(adapter);
+ if (eicr & IXGBE_EICR_FW_EVENT)
+ ixgbe_schedule_fw_event(adapter);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X540)) {
adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -3334,6 +3573,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
+ if (eicr & IXGBE_EICR_FW_EVENT)
+ ixgbe_schedule_fw_event(adapter);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
@@ -3342,6 +3584,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
@@ -3442,6 +3685,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -4359,6 +4603,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
fallthrough;
@@ -4434,7 +4679,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
/* add VID to filter table */
@@ -4493,7 +4738,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
/* remove VID from filter table */
@@ -4526,6 +4771,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4564,6 +4810,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4716,7 +4963,7 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
**/
static int ixgbe_write_mc_addr_list(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!netif_running(netdev))
@@ -4892,7 +5139,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int ret;
ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
@@ -4902,7 +5149,7 @@ static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
@@ -4920,7 +5167,7 @@ static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
**/
void ixgbe_set_rx_mode(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
netdev_features_t features = netdev->features;
@@ -5022,7 +5269,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
struct udp_tunnel_info ti;
@@ -5148,6 +5395,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
@@ -5208,6 +5456,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
@@ -5510,6 +5759,48 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_enable_link_status_events - enable link status events
+ * @adapter: pointer to the adapter structure
+ * @mask: event mask to be set
+ *
+ * Enables link status events by invoking ixgbe_configure_lse()
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_enable_link_status_events(struct ixgbe_adapter *adapter,
+ u16 mask)
+{
+ int err;
+
+ err = ixgbe_configure_lse(&adapter->hw, true, mask);
+ if (err)
+ return err;
+
+ adapter->lse_mask = mask;
+ return 0;
+}
+
+/**
+ * ixgbe_disable_link_status_events - disable link status events
+ * @adapter: pointer to the adapter structure
+ *
+ * Disables link status events by invoking ixgbe_configure_lse()
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_disable_link_status_events(struct ixgbe_adapter *adapter)
+{
+ int err;
+
+ err = ixgbe_configure_lse(&adapter->hw, false, adapter->lse_mask);
+ if (err)
+ return err;
+
+ adapter->lse_mask = 0;
+ return 0;
+}
+
+/**
* ixgbe_sfp_link_config - set up SFP+ link
* @adapter: pointer to private adapter struct
**/
@@ -5532,13 +5823,21 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
* ixgbe_non_sfp_link_config - set up non-SFP+ link
* @hw: pointer to private hardware struct
*
- * Returns 0 on success, negative on failure
+ * Configure non-SFP link.
+ *
+ * Return: 0 on success, negative on failure
**/
static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
- u32 speed;
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+ u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
+ IXGBE_ACI_LINK_EVENT_MEDIA_NA |
+ IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
+ IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
bool autoneg, link_up = false;
int ret = -EIO;
+ u32 speed;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -5561,13 +5860,53 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
if (ret)
return ret;
- if (hw->mac.ops.setup_link)
+ if (hw->mac.ops.setup_link) {
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ret = ixgbe_enable_link_status_events(adapter, mask);
+ if (ret)
+ return ret;
+ }
ret = hw->mac.ops.setup_link(hw, speed, link_up);
+ }
return ret;
}
/**
+ * ixgbe_check_media_subtask - check for media
+ * @adapter: pointer to adapter structure
+ *
+ * If media is available then initialize PHY user configuration. Configure the
+ * PHY if the interface is up.
+ */
+static void ixgbe_check_media_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* No need to check for media if it's already present */
+ if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA))
+ return;
+
+ /* Refresh link info and check if media is present */
+ if (ixgbe_update_link_info(hw))
+ return;
+
+ ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err);
+
+ if (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
+ /* PHY settings are reset on media insertion, reconfigure
+ * PHY to preserve settings.
+ */
+ if (!(ixgbe_non_sfp_link_config(&adapter->hw)))
+ adapter->flags2 &= ~IXGBE_FLAG2_NO_MEDIA;
+
+ /* A Link Status Event will be generated; the event handler
+ * will complete bringing the interface up
+ */
+ }
+}
+
+/**
* ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
* @adapter: board private structure
*
@@ -5630,6 +5969,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5980,6 +6320,7 @@ dma_engine_disable:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
@@ -6198,7 +6539,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
- del_timer_sync(&adapter->service_timer);
+ timer_delete_sync(&adapter->service_timer);
if (adapter->num_vfs) {
/* Clear EITR Select mapping */
@@ -6224,6 +6565,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_disable_link_status_events(adapter);
}
/**
@@ -6258,7 +6601,7 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
**/
static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* Do the reset outside of interrupt context */
ixgbe_tx_timeout_reset(adapter);
@@ -6279,6 +6622,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
break;
@@ -6342,6 +6686,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
+ hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
+
/* get_invariants needs the device IDs */
ii->get_invariants(hw);
@@ -6504,7 +6850,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
/* initialize eeprom parameters */
- if (ixgbe_init_eeprom_params_generic(hw)) {
+ if (hw->eeprom.ops.init_params(hw)) {
e_dev_err("EEPROM initialization failed\n");
return -EIO;
}
@@ -6820,7 +7166,7 @@ static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
**/
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (ixgbe_enabled_xdp_adapter(adapter)) {
int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
@@ -6867,7 +7213,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
**/
int ixgbe_open(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
int err, queues;
@@ -6909,6 +7255,19 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
udp_tunnel_nic_reset_ntf(netdev);
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ int err = ixgbe_update_link_info(&adapter->hw);
+
+ if (err)
+ e_dev_err("Failed to update link info, err %d.\n", err);
+
+ ixgbe_check_link_cfg_err(adapter,
+ adapter->hw.link.link_info.link_cfg_err);
+
+ err = ixgbe_non_sfp_link_config(&adapter->hw);
+ if (ixgbe_non_sfp_link_config(&adapter->hw))
+ e_dev_err("Link setup failed, err %d.\n", err);
+ }
return 0;
@@ -6958,7 +7317,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
**/
int ixgbe_close(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
ixgbe_ptp_stop(adapter);
@@ -7062,6 +7421,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
@@ -7209,6 +7569,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
@@ -7221,11 +7582,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
for (i = 0; i < 16; i++) {
hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540) ||
- (hw->mac.type == ixgbe_mac_X550) ||
- (hw->mac.type == ixgbe_mac_X550EM_x) ||
- (hw->mac.type == ixgbe_mac_x550em_a)) {
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_x550em_a ||
+ hw->mac.type == ixgbe_mac_e610) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -7251,6 +7613,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* OS2BMC stats are X540 and later */
hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -7551,6 +7914,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
case ixgbe_mac_82599EB: {
u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -7946,7 +8310,8 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
**/
static void ixgbe_service_timer(struct timer_list *t)
{
- struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
+ struct ixgbe_adapter *adapter = timer_container_of(adapter, t,
+ service_timer);
unsigned long next_event_offset;
/* poll faster when waiting for link */
@@ -8001,6 +8366,34 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
rtnl_unlock();
}
+static int ixgbe_check_fw_api_mismatch(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return 0;
+
+ if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw))
+ return 0;
+
+ if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
+ e_dev_err("The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ return -EOPNOTSUPP;
+ } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
+ hw->api_min_ver > IXGBE_FW_API_VER_MINOR + IXGBE_FW_API_VER_DIFF_ALLOWED) {
+ e_dev_info("The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
+ hw->api_min_ver < IXGBE_FW_API_VER_MINOR - IXGBE_FW_API_VER_DIFF_ALLOWED) {
+ e_dev_info("The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ }
+
+ return 0;
+}
+
/**
* ixgbe_check_fw_error - Check firmware for errors
* @adapter: the adapter private structure
@@ -8011,12 +8404,14 @@ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 fwsm;
+ int err;
/* read fwsm.ext_err_ind register and log errors */
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+ /* skip if E610's FW is reloading, warning in that case may be misleading */
if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
- !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
+ (!(fwsm & IXGBE_FWSM_FW_VAL_BIT) && !(hw->mac.type == ixgbe_mac_e610)))
e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
fwsm);
@@ -8024,10 +8419,53 @@ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
return true;
}
+ if (!(adapter->flags2 & IXGBE_FLAG2_API_MISMATCH)) {
+ err = ixgbe_check_fw_api_mismatch(adapter);
+ if (err)
+ return true;
+ }
+
+ /* return here if FW rollback mode has been already detected */
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ROLLBACK)
+ return false;
+
+ if (hw->mac.ops.fw_rollback_mode && hw->mac.ops.fw_rollback_mode(hw)) {
+ struct ixgbe_nvm_info *nvm_info = &adapter->hw.flash.nvm;
+ char ver_buff[64] = "";
+
+ if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw))
+ goto no_version;
+
+ if (hw->mac.ops.get_nvm_ver &&
+ hw->mac.ops.get_nvm_ver(hw, nvm_info))
+ goto no_version;
+
+ snprintf(ver_buff, sizeof(ver_buff),
+ "Current version is NVM:%x.%x.%x, FW:%d.%d. ",
+ nvm_info->major, nvm_info->minor, nvm_info->eetrack,
+ hw->fw_maj_ver, hw->fw_maj_ver);
+no_version:
+ e_dev_warn("Firmware rollback mode detected. %sDevice may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode.",
+ ver_buff);
+
+ adapter->flags2 |= IXGBE_FLAG2_FW_ROLLBACK;
+ }
return false;
}
+static void ixgbe_recovery_service_task(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work,
+ struct ixgbe_adapter,
+ service_task);
+
+ ixgbe_handle_fw_event(adapter);
+ ixgbe_service_event_complete(adapter);
+
+ mod_timer(&adapter->service_timer, jiffies + msecs_to_jiffies(100));
+}
+
/**
* ixgbe_service_task - manages and runs subtasks
* @work: pointer to work_struct containing our data
@@ -8047,11 +8485,21 @@ static void ixgbe_service_task(struct work_struct *work)
return;
}
if (ixgbe_check_fw_error(adapter)) {
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ if (adapter->mii_bus) {
+ mdiobus_unregister(adapter->mii_bus);
+ adapter->mii_bus = NULL;
+ }
unregister_netdev(adapter->netdev);
+ }
ixgbe_service_event_complete(adapter);
return;
}
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT)
+ ixgbe_handle_fw_event(adapter);
+ ixgbe_check_media_subtask(adapter);
+ }
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -8633,7 +9081,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_FCOE):
case htons(ETH_P_FIP):
- adapter = netdev_priv(dev);
+ adapter = ixgbe_from_netdev(dev);
if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
@@ -8892,7 +9340,7 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
struct net_device *netdev,
struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *tx_ring;
/*
@@ -8924,7 +9372,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
**/
static int ixgbe_set_mac(struct net_device *netdev, void *p)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
@@ -8942,7 +9390,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
static int
ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 value;
int rc;
@@ -8968,7 +9416,7 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
u16 addr, u16 value)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (adapter->mii_bus) {
@@ -8988,7 +9436,7 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
switch (cmd) {
case SIOCSHWTSTAMP:
@@ -9014,7 +9462,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
static int ixgbe_add_sanmac_netdev(struct net_device *dev)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
if (is_valid_ether_addr(hw->mac.san_addr)) {
@@ -9038,7 +9486,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
static int ixgbe_del_sanmac_netdev(struct net_device *dev)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
if (is_valid_ether_addr(mac->san_addr)) {
@@ -9069,7 +9517,7 @@ static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
static void ixgbe_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i;
rcu_read_lock();
@@ -9112,7 +9560,7 @@ static void ixgbe_get_stats64(struct net_device *netdev,
static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf,
struct ifla_vf_stats *vf_stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf < 0 || vf >= adapter->num_vfs)
return -EINVAL;
@@ -9229,7 +9677,7 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev,
static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct netdev_nested_priv priv = {
.data = (void *)adapter,
};
@@ -9250,7 +9698,7 @@ static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
*/
int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
/* Hardware supports up to 8 traffic classes */
@@ -9808,7 +10256,7 @@ static LIST_HEAD(ixgbe_block_cb_list);
static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
switch (type) {
case TC_SETUP_BLOCK:
@@ -9836,7 +10284,7 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
#endif
void ixgbe_do_reset(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -9847,7 +10295,7 @@ void ixgbe_do_reset(struct net_device *netdev)
static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
netdev_features_t features)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
if (!(features & NETIF_F_RXCSUM))
@@ -9884,7 +10332,7 @@ static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
static int ixgbe_set_features(struct net_device *netdev,
netdev_features_t features)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
netdev_features_t changed = netdev->features ^ features;
bool need_reset = false;
@@ -9960,7 +10408,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
{
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
u16 pool = VMDQ_P(0);
if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
@@ -10048,7 +10496,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct nlattr *attr, *br_spec;
int rem;
@@ -10076,7 +10524,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask, int nlflags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return 0;
@@ -10088,7 +10536,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(pdev);
struct ixgbe_fwd_adapter *accel;
int tcs = adapter->hw_tcs ? : 1;
int pool, err;
@@ -10185,7 +10633,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
struct ixgbe_fwd_adapter *accel = priv;
- struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(pdev);
unsigned int rxbase = accel->rx_base_queue;
unsigned int i;
@@ -10263,7 +10711,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
{
int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct bpf_prog *old_prog;
bool need_reset;
int num_queues;
@@ -10335,7 +10783,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -10370,7 +10818,7 @@ void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring)
static int ixgbe_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ring *ring;
int nxmit = 0;
int i;
@@ -10771,6 +11219,24 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_set_fw_version_e610 - Set FW version specifically on E610 adapters
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ *
+ */
+void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
+ nvm->eetrack, orom->major, orom->build, orom->patch);
+}
+
+/**
* ixgbe_set_fw_version - Set FW version
* @adapter: the adapter private structure
*
@@ -10782,6 +11248,11 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_nvm_version nvm_ver;
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ixgbe_set_fw_version_e610(adapter);
+ return;
+ }
+
ixgbe_get_oem_prod_version(hw, &nvm_ver);
if (nvm_ver.oem_valid) {
snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
@@ -10806,6 +11277,66 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_recovery_probe - Handle FW recovery mode during probe
+ * @adapter: the adapter private structure
+ *
+ * Perform limited driver initialization when FW error is detected.
+ *
+ * Return: 0 on successful probe for E610, -EIO if recovery mode is detected
+ * for non-E610 adapter, error status code on any other case.
+ */
+static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool disable_dev;
+ int err = -EIO;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ goto clean_up_probe;
+
+ ixgbe_get_hw_control(adapter);
+ mutex_init(&hw->aci.lock);
+ err = ixgbe_get_flash_data(&adapter->hw);
+ if (err)
+ goto shutdown_aci;
+
+ timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
+ INIT_WORK(&adapter->service_task, ixgbe_recovery_service_task);
+ set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
+ clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
+
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
+
+ pci_set_drvdata(pdev, adapter);
+ /* We are creating devlink interface so NIC can be managed,
+ * e.g. new NVM image loaded
+ */
+ devl_lock(adapter->devlink);
+ ixgbe_devlink_register_port(adapter);
+ SET_NETDEV_DEVLINK_PORT(adapter->netdev,
+ &adapter->devlink_port);
+ ixgbe_devlink_init_regions(adapter);
+ devl_register(adapter->devlink);
+ devl_unlock(adapter->devlink);
+
+ return 0;
+shutdown_aci:
+ mutex_destroy(&adapter->hw.aci.lock);
+ ixgbe_release_hw_control(adapter);
+ devlink_free(adapter->devlink);
+clean_up_probe:
+ disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
+ free_netdev(netdev);
+ pci_release_mem_regions(pdev);
+ if (disable_dev)
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ixgbe_pci_tbl
@@ -10819,6 +11350,7 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
+ struct ixgbe_netdevice_priv *netdev_priv_wrapper;
struct ixgbe_adapter *adapter = NULL;
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
@@ -10868,9 +11400,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#else
indices = IXGBE_MAX_RSS_INDICES;
#endif
+ } else if (ii->mac == ixgbe_mac_e610) {
+ indices = IXGBE_MAX_RSS_INDICES_X550;
}
- netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
+ adapter = ixgbe_allocate_devlink(&pdev->dev);
+ if (IS_ERR(adapter)) {
+ err = PTR_ERR(adapter);
+ goto err_devlink;
+ }
+
+ netdev = alloc_etherdev_mq(sizeof(*netdev_priv_wrapper), indices);
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -10878,7 +11418,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
- adapter = netdev_priv(netdev);
+ netdev_priv_wrapper = netdev_priv(netdev);
+ netdev_priv_wrapper->adapter = adapter;
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -10894,11 +11435,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
}
- netdev->netdev_ops = &ixgbe_netdev_ops;
- ixgbe_set_ethtool_ops(netdev);
- netdev->watchdog_timeo = 5 * HZ;
- strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
-
/* Setup hw api */
hw->mac.ops = *ii->mac_ops;
hw->mac.type = ii->mac;
@@ -10928,17 +11464,40 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.mdio.mdio_read = ixgbe_mdio_read;
hw->phy.mdio.mdio_write = ixgbe_mdio_write;
+ netdev->netdev_ops = &ixgbe_netdev_ops;
+ ixgbe_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+
/* setup the private structure */
err = ixgbe_sw_init(adapter, ii);
if (err)
goto err_sw_init;
+ /* Make sure the SWFW semaphore is in a valid state */
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+
+ if (ixgbe_check_fw_error(adapter))
+ return ixgbe_recovery_probe(adapter);
+
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ err = ixgbe_get_caps(&adapter->hw);
+ if (err)
+ dev_err(&pdev->dev, "ixgbe_get_caps failed %d\n", err);
+
+ err = ixgbe_get_flash_data(&adapter->hw);
+ if (err)
+ goto err_sw_init;
+ }
+
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_e610:
netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
break;
case ixgbe_mac_x550em_a:
@@ -10948,10 +11507,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- /* Make sure the SWFW semaphore is in a valid state */
- if (hw->mac.ops.init_swfw_sync)
- hw->mac.ops.init_swfw_sync(hw);
-
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -10959,6 +11514,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
break;
default:
@@ -11103,11 +11659,6 @@ skip_sriov:
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO;
- if (ixgbe_check_fw_error(adapter)) {
- err = -EIO;
- goto err_sw_init;
- }
-
/* make sure the EEPROM is good */
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
e_dev_err("The EEPROM Checksum Is Not Valid\n");
@@ -11130,6 +11681,8 @@ skip_sriov:
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ixgbe_mac_set_default_filter(adapter);
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_init(&hw->aci.lock);
timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
if (ixgbe_removed(hw->hw_addr)) {
@@ -11188,7 +11741,7 @@ skip_sriov:
if (expected_gts > 0)
ixgbe_check_minimum_link(adapter, expected_gts);
- err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
+ err = hw->eeprom.ops.read_pba_string(hw, part_str, sizeof(part_str));
if (err)
strscpy(part_str, "Unknown", sizeof(part_str));
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
@@ -11214,6 +11767,11 @@ skip_sriov:
}
strcpy(netdev->name, "eth%d");
pci_set_drvdata(pdev, adapter);
+
+ devl_lock(adapter->devlink);
+ ixgbe_devlink_register_port(adapter);
+ SET_NETDEV_DEVLINK_PORT(adapter->netdev, &adapter->devlink_port);
+
err = register_netdev(netdev);
if (err)
goto err_register;
@@ -11268,13 +11826,20 @@ skip_sriov:
if (err)
goto err_netdev;
+ ixgbe_devlink_init_regions(adapter);
+ devl_register(adapter->devlink);
+ devl_unlock(adapter->devlink);
return 0;
err_netdev:
unregister_netdev(netdev);
err_register:
+ devl_port_unregister(&adapter->devlink_port);
+ devl_unlock(adapter->devlink);
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_destroy(&adapter->hw.aci.lock);
err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
@@ -11287,7 +11852,9 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
+ devlink_free(adapter->devlink);
pci_release_mem_regions(pdev);
+err_devlink:
err_pci_reg:
err_dma:
if (!adapter || disable_dev)
@@ -11316,11 +11883,19 @@ static void ixgbe_remove(struct pci_dev *pdev)
return;
netdev = adapter->netdev;
+ devl_lock(adapter->devlink);
+ devl_unregister(adapter->devlink);
+ ixgbe_devlink_destroy_regions(adapter);
ixgbe_dbg_adapter_exit(adapter);
set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ixgbe_disable_link_status_events(adapter);
+ mutex_destroy(&adapter->hw.aci.lock);
+ }
+
if (adapter->mii_bus)
mdiobus_unregister(adapter->mii_bus);
@@ -11346,6 +11921,10 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ devl_port_unregister(&adapter->devlink_port);
+ devl_unlock(adapter->devlink);
+ devlink_free(adapter->devlink);
+
ixgbe_stop_ipsec_offload(adapter);
ixgbe_clear_interrupt_scheme(adapter);
@@ -11452,6 +12031,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
case ixgbe_mac_x550em_a:
device_id = IXGBE_DEV_ID_X550EM_A_VF;
break;
+ case ixgbe_mac_e610:
+ device_id = IXGBE_DEV_ID_E610_VF;
+ break;
default:
device_id = 0;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index d67d77e5dacc..788b5af07c70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -283,6 +283,7 @@ static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
@@ -407,6 +408,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_x550em_a &&
+ hw->mac.type != ixgbe_mac_e610 &&
hw->mac.type != ixgbe_mac_X540)
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 07eaa3c3f4d3..2d54828bdfbb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -167,7 +167,7 @@ int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- int max_retry = 1;
+ int max_retry = 3;
int retry = 0;
u8 reg_high;
u8 csum;
@@ -1117,7 +1117,7 @@ int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
MDIO_MMD_AN, &autoneg_reg);
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_e610) {
/* Set or unset auto-negotiation 5G advertisement */
autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
@@ -1233,6 +1233,7 @@ static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
switch (hw->mac.type) {
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
break;
@@ -2284,7 +2285,7 @@ static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- u32 max_retry = 1;
+ u32 max_retry = 3;
u32 retry = 0;
int status;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 14aa2ca51f70..81179c60af4e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -40,7 +40,7 @@
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
-#define IXGBE_SFF_BASEBX10_CAPABLE 0x64
+#define IXGBE_SFF_BASEBX10_CAPABLE 0x40
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 9339edbd9082..eef25e11d938 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -140,6 +140,7 @@
* proper mult and shift to convert the cycles into nanoseconds of time.
*/
#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL
+#define IXGBE_E610_BASE_PERIOD 0x333333333ULL
#define INCVALUE_MASK 0x7FFFFFFF
#define ISGN 0x80000000
@@ -415,6 +416,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -492,11 +494,13 @@ static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm)
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
+ u64 rate, base;
bool neg_adj;
- u64 rate;
u32 inca;
- neg_adj = diff_by_scaled_ppm(IXGBE_X550_BASE_PERIOD, scaled_ppm, &rate);
+ base = hw->mac.type == ixgbe_mac_e610 ? IXGBE_E610_BASE_PERIOD :
+ IXGBE_X550_BASE_PERIOD;
+ neg_adj = diff_by_scaled_ppm(base, scaled_ppm, &rate);
/* warn if rate is too large */
if (rate >= INCVALUE_MASK)
@@ -559,6 +563,7 @@ static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -1067,6 +1072,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* enable timestamping all packets only if at least some
* packets were requested. Otherwise, play nice and disable
* timestamping
@@ -1233,6 +1239,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
fallthrough;
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
cc.read = ixgbe_ptp_read_X550;
break;
case ixgbe_mac_X540:
@@ -1280,6 +1287,7 @@ static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
/* Reset SYSTIME registers to 0 */
@@ -1407,6 +1415,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 30000000;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 9631559a5aea..0dbbd2befd4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1048,7 +1048,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
break;
}
- e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
+ e_dbg(drv, "VF %d requested unsupported api version %u\n", vf, api);
return -1;
}
@@ -1418,7 +1418,7 @@ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int retval;
if (vf >= adapter->num_vfs)
@@ -1526,7 +1526,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
u8 qos, __be16 vlan_proto)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
@@ -1644,7 +1644,7 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int link_speed;
/* verify VF is active */
@@ -1679,7 +1679,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (vf >= adapter->num_vfs)
@@ -1757,7 +1757,7 @@ void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state)
**/
int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int ret = 0;
if (vf < 0 || vf >= adapter->num_vfs) {
@@ -1794,7 +1794,7 @@ int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* This operation is currently supported only for 82599 and x540
* devices.
@@ -1813,7 +1813,7 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf >= adapter->num_vfs)
return -EINVAL;
@@ -1836,7 +1836,7 @@ int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf >= adapter->num_vfs)
return -EINVAL;
ivi->vf = vf;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 9baccacd02a1..892fa6c1f879 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBE_TYPE_H_
#define _IXGBE_TYPE_H_
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/mdio.h>
#include <linux/netdevice.h>
+#include "ixgbe_type_e610.h"
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
@@ -71,12 +72,19 @@
#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
+#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE
+#define IXGBE_DEV_ID_E610_SFP 0x57AF
+#define IXGBE_DEV_ID_E610_10G_T 0x57B0
+#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1
+#define IXGBE_DEV_ID_E610_SGMII 0x57B2
+
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
+#define IXGBE_DEV_ID_E610_VF 0x57AD
#define IXGBE_CAT(r, m) IXGBE_##r##_##m
@@ -1600,7 +1608,7 @@ enum {
#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
-#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
@@ -1636,6 +1644,7 @@ enum {
#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
@@ -1654,6 +1663,7 @@ enum {
#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
@@ -1673,6 +1683,7 @@ enum {
#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
@@ -2068,6 +2079,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_DEVICE_CAPS 0x2C
#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2168,6 +2180,7 @@ enum {
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_LINK_STATUS_E610 0x82
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
@@ -2288,6 +2301,7 @@ enum {
#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_TPE 0x00000080 /* Tag Promiscuous Ena*/
#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
@@ -2351,6 +2365,7 @@ enum {
/* Multiple Transmit Queue Command Register */
#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_NUM_TC_OR_Q 0xC /* Number of TCs or TxQs per pool */
#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
@@ -2970,6 +2985,29 @@ typedef u32 ixgbe_link_speed;
IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
+/* Physical layer type */
+typedef u64 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
+#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_T 0x20000
+#define IXGBE_PHYSICAL_LAYER_5000BASE_T 0x40000
+
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
*/
@@ -3145,6 +3183,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550,
ixgbe_mac_X550EM_x,
ixgbe_mac_x550em_a,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
@@ -3224,7 +3264,9 @@ enum ixgbe_media_type {
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
- ixgbe_media_type_virtual
+ ixgbe_media_type_virtual,
+ ixgbe_media_type_da,
+ ixgbe_media_type_aui,
};
/* Flow Control Settings */
@@ -3233,7 +3275,8 @@ enum ixgbe_fc_mode {
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
ixgbe_fc_full,
- ixgbe_fc_default
+ ixgbe_fc_default,
+ ixgbe_fc_pfc,
};
/* Smart Speed Settings */
@@ -3403,6 +3446,8 @@ struct ixgbe_eeprom_operations {
int (*validate_checksum)(struct ixgbe_hw *, u16 *);
int (*update_checksum)(struct ixgbe_hw *);
int (*calc_checksum)(struct ixgbe_hw *);
+ int (*read_pba_string)(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
};
struct ixgbe_mac_operations {
@@ -3411,6 +3456,7 @@ struct ixgbe_mac_operations {
int (*start_hw)(struct ixgbe_hw *);
int (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ int (*get_fw_ver)(struct ixgbe_hw *hw);
int (*get_mac_addr)(struct ixgbe_hw *, u8 *);
int (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
int (*get_device_caps)(struct ixgbe_hw *, u16 *);
@@ -3479,6 +3525,8 @@ struct ixgbe_mac_operations {
int (*get_thermal_sensor_data)(struct ixgbe_hw *);
int (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
+ bool (*fw_rollback_mode)(struct ixgbe_hw *hw);
+ int (*get_nvm_ver)(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
@@ -3533,6 +3581,9 @@ struct ixgbe_link_operations {
struct ixgbe_link_info {
struct ixgbe_link_operations ops;
u8 addr;
+ struct ixgbe_link_status link_info;
+ struct ixgbe_link_status link_info_old;
+ u8 get_link_info;
};
struct ixgbe_eeprom_info {
@@ -3575,6 +3626,7 @@ struct ixgbe_mac_info {
u8 san_mac_rar_index;
struct ixgbe_thermal_sensor_data thermal_sensor_data;
bool set_lben;
+ u32 max_link_up_time;
u8 led_link_act;
};
@@ -3599,6 +3651,10 @@ struct ixgbe_phy_info {
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
u32 nw_mng_if_sel;
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u16 curr_user_speed_req;
+ struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg;
};
struct ixgbe_mbx_stats {
@@ -3643,6 +3699,19 @@ struct ixgbe_hw {
bool allow_unsupported_sfp;
bool wol_enabled;
bool need_crosstalk_fix;
+ u8 api_branch;
+ u8 api_maj_ver;
+ u8 api_min_ver;
+ u8 api_patch;
+ u8 fw_branch;
+ u8 fw_maj_ver;
+ u8 fw_min_ver;
+ u8 fw_patch;
+ u32 fw_build;
+ struct ixgbe_aci_info aci;
+ struct ixgbe_flash_info flash;
+ struct ixgbe_hw_dev_caps dev_caps;
+ struct ixgbe_hw_func_caps func_caps;
};
struct ixgbe_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
new file mode 100644
index 000000000000..09df67f03cf4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -0,0 +1,1250 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_TYPE_E610_H_
+#define _IXGBE_TYPE_E610_H_
+
+#define BYTES_PER_DWORD 4
+
+/* General E610 defines */
+#define IXGBE_MAX_VSI 768
+
+/* Checksum and Shadow RAM pointers */
+#define IXGBE_E610_SR_NVM_CTRL_WORD 0x00
+#define IXGBE_E610_SR_PBA_BLOCK_PTR 0x16
+#define IXGBE_E610_SR_PBA_BLOCK_MASK GENMASK(15, 8)
+#define IXGBE_E610_SR_NVM_DEV_STARTER_VER 0x18
+#define IXGBE_E610_SR_NVM_EETRACK_LO 0x2D
+#define IXGBE_E610_SR_NVM_EETRACK_HI 0x2E
+#define IXGBE_E610_NVM_VER_LO_MASK GENMASK(7, 0)
+#define IXGBE_E610_NVM_VER_HI_MASK GENMASK(15, 12)
+#define IXGBE_E610_SR_SW_CHECKSUM_WORD 0x3F
+#define IXGBE_E610_SR_PFA_PTR 0x40
+#define IXGBE_E610_SR_1ST_NVM_BANK_PTR 0x42
+#define IXGBE_E610_SR_NVM_BANK_SIZE 0x43
+#define IXGBE_E610_SR_1ST_OROM_BANK_PTR 0x44
+#define IXGBE_E610_SR_OROM_BANK_SIZE 0x45
+#define IXGBE_E610_SR_NETLIST_BANK_PTR 0x46
+#define IXGBE_E610_SR_NETLIST_BANK_SIZE 0x47
+
+/* The OROM version topology */
+#define IXGBE_OROM_VER_PATCH_MASK GENMASK_ULL(7, 0)
+#define IXGBE_OROM_VER_BUILD_MASK GENMASK_ULL(23, 8)
+#define IXGBE_OROM_VER_MASK GENMASK_ULL(31, 24)
+
+/* CSS Header words */
+#define IXGBE_NVM_CSS_HDR_LEN_L 0x02
+#define IXGBE_NVM_CSS_HDR_LEN_H 0x03
+#define IXGBE_NVM_CSS_SREV_L 0x14
+#define IXGBE_NVM_CSS_SREV_H 0x15
+
+#define IXGBE_HDR_LEN_ROUNDUP 32
+
+/* Length of Authentication header section in words */
+#define IXGBE_NVM_AUTH_HEADER_LEN 0x08
+
+/* Shadow RAM related */
+#define IXGBE_SR_WORDS_IN_1KB 512
+
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define IXGBE_NETLIST_ID_BLK_SIZE 0x30
+#define IXGBE_NETLIST_ID_BLK_OFFSET(n) IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
+#define IXGBE_NETLIST_ID_BLK_TYPE_LOW 0x06
+#define IXGBE_NETLIST_ID_BLK_TYPE_HIGH 0x07
+#define IXGBE_NETLIST_ID_BLK_REV_LOW 0x08
+#define IXGBE_NETLIST_ID_BLK_REV_HIGH 0x09
+#define IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
+#define IXGBE_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_MOD_ID 0x011B
+#define IXGBE_NETLIST_TYPE_OFFSET 0x0000
+#define IXGBE_NETLIST_LEN_OFFSET 0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ixgbe_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
+#define IXGBE_LINK_TOPO_MODULE_LEN IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define IXGBE_LINK_TOPO_NODE_COUNT IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+#define IXGBE_LINK_TOPO_NODE_COUNT_M GENMASK_ULL(9, 0)
+
+/* Firmware Status Register (GL_FWSTS) */
+#define GL_FWSTS 0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_EP_PF0 BIT(24)
+#define GL_FWSTS_EP_PF1 BIT(25)
+
+/* Global NVM General Status Register */
+#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_SR_SIZE_M GENMASK(7, 5)
+
+#define IXGBE_GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
+#define IXGBE_GL_MNG_FWSM_RECOVERY_M BIT(1)
+#define IXGBE_GL_MNG_FWSM_ROLLBACK_M BIT(2)
+
+/* Flash Access Register */
+#define IXGBE_GLNVM_FLA 0x000B6108 /* Reset Source: POR */
+#define IXGBE_GLNVM_FLA_LOCKED_S 6
+#define IXGBE_GLNVM_FLA_LOCKED_M BIT(6)
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define IXGBE_SR_CTRL_WORD_1_M GENMASK(7, 6)
+#define IXGBE_SR_CTRL_WORD_VALID BIT(0)
+#define IXGBE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define IXGBE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define IXGBE_SR_CTRL_WORD_NVM_BANK BIT(5)
+#define IXGBE_SR_NVM_PTR_4KB_UNITS BIT(15)
+
+/* Admin Command Interface (ACI) registers */
+#define IXGBE_PF_HIDA(_i) (0x00085000 + ((_i) * 4))
+#define IXGBE_PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
+#define IXGBE_PF_HIBA(_i) (0x00084000 + ((_i) * 4))
+#define IXGBE_PF_HICR 0x00082048
+
+#define IXGBE_PF_HICR_EN BIT(0)
+#define IXGBE_PF_HICR_C BIT(1)
+#define IXGBE_PF_HICR_SV BIT(2)
+#define IXGBE_PF_HICR_EV BIT(3)
+
+#define IXGBE_FW_API_VER_MAJOR 0x01
+#define IXGBE_FW_API_VER_MINOR 0x07
+#define IXGBE_FW_API_VER_DIFF_ALLOWED 0x02
+
+#define IXGBE_ACI_DESC_SIZE 32
+#define IXGBE_ACI_DESC_SIZE_IN_DWORDS (IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD)
+
+#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */
+#define IXGBE_ACI_SEND_DELAY_TIME_MS 10
+#define IXGBE_ACI_SEND_MAX_EXECUTE 3
+#define IXGBE_ACI_SEND_TIMEOUT_MS \
+ (IXGBE_ACI_SEND_MAX_EXECUTE * IXGBE_ACI_SEND_DELAY_TIME_MS)
+/* [ms] timeout of waiting for sync response */
+#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000
+/* [ms] timeout of waiting for async response */
+#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000
+/* [ms] timeout of waiting for resource release */
+#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define IXGBE_ACI_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+#define IXGBE_ACI_FLAG_DD BIT(0) /* 0x1 */
+#define IXGBE_ACI_FLAG_CMP BIT(1) /* 0x2 */
+#define IXGBE_ACI_FLAG_ERR BIT(2) /* 0x4 */
+#define IXGBE_ACI_FLAG_VFE BIT(3) /* 0x8 */
+#define IXGBE_ACI_FLAG_LB BIT(9) /* 0x200 */
+#define IXGBE_ACI_FLAG_RD BIT(10) /* 0x400 */
+#define IXGBE_ACI_FLAG_VFC BIT(11) /* 0x800 */
+#define IXGBE_ACI_FLAG_BUF BIT(12) /* 0x1000 */
+#define IXGBE_ACI_FLAG_SI BIT(13) /* 0x2000 */
+#define IXGBE_ACI_FLAG_EI BIT(14) /* 0x4000 */
+#define IXGBE_ACI_FLAG_FE BIT(15) /* 0x8000 */
+
+/* Admin Command Interface (ACI) error codes */
+enum ixgbe_aci_err {
+ IXGBE_ACI_RC_OK = 0, /* Success */
+ IXGBE_ACI_RC_EPERM = 1, /* Operation not permitted */
+ IXGBE_ACI_RC_ENOENT = 2, /* No such element */
+ IXGBE_ACI_RC_ESRCH = 3, /* Bad opcode */
+ IXGBE_ACI_RC_EINTR = 4, /* Operation interrupted */
+ IXGBE_ACI_RC_EIO = 5, /* I/O error */
+ IXGBE_ACI_RC_ENXIO = 6, /* No such resource */
+ IXGBE_ACI_RC_E2BIG = 7, /* Arg too long */
+ IXGBE_ACI_RC_EAGAIN = 8, /* Try again */
+ IXGBE_ACI_RC_ENOMEM = 9, /* Out of memory */
+ IXGBE_ACI_RC_EACCES = 10, /* Permission denied */
+ IXGBE_ACI_RC_EFAULT = 11, /* Bad address */
+ IXGBE_ACI_RC_EBUSY = 12, /* Device or resource busy */
+ IXGBE_ACI_RC_EEXIST = 13, /* Object already exists */
+ IXGBE_ACI_RC_EINVAL = 14, /* Invalid argument */
+ IXGBE_ACI_RC_ENOTTY = 15, /* Not a typewriter */
+ IXGBE_ACI_RC_ENOSPC = 16, /* No space left or alloc failure */
+ IXGBE_ACI_RC_ENOSYS = 17, /* Function not implemented */
+ IXGBE_ACI_RC_ERANGE = 18, /* Parameter out of range */
+ IXGBE_ACI_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IXGBE_ACI_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IXGBE_ACI_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IXGBE_ACI_RC_EFBIG = 22, /* File too big */
+ IXGBE_ACI_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */
+ IXGBE_ACI_RC_ENOSEC = 24, /* Missing security manifest */
+ IXGBE_ACI_RC_EBADSIG = 25, /* Bad RSA signature */
+ IXGBE_ACI_RC_ESVN = 26, /* SVN number prohibits this package */
+ IXGBE_ACI_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ IXGBE_ACI_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+ IXGBE_ACI_RC_EACCES_BMCU = 29, /* BMC Update in progress */
+};
+
+/* Admin Command Interface (ACI) opcodes */
+enum ixgbe_aci_opc {
+ ixgbe_aci_opc_get_ver = 0x0001,
+ ixgbe_aci_opc_driver_ver = 0x0002,
+ ixgbe_aci_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ixgbe_aci_opc_req_res = 0x0008,
+ ixgbe_aci_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ixgbe_aci_opc_list_func_caps = 0x000A,
+ ixgbe_aci_opc_list_dev_caps = 0x000B,
+
+ /* safe disable of RXEN */
+ ixgbe_aci_opc_disable_rxen = 0x000C,
+
+ /* FW events */
+ ixgbe_aci_opc_get_fw_event = 0x0014,
+
+ /* PHY commands */
+ ixgbe_aci_opc_get_phy_caps = 0x0600,
+ ixgbe_aci_opc_set_phy_cfg = 0x0601,
+ ixgbe_aci_opc_restart_an = 0x0605,
+ ixgbe_aci_opc_get_link_status = 0x0607,
+ ixgbe_aci_opc_set_event_mask = 0x0613,
+ ixgbe_aci_opc_get_link_topo = 0x06E0,
+ ixgbe_aci_opc_get_link_topo_pin = 0x06E1,
+ ixgbe_aci_opc_read_i2c = 0x06E2,
+ ixgbe_aci_opc_write_i2c = 0x06E3,
+ ixgbe_aci_opc_read_mdio = 0x06E4,
+ ixgbe_aci_opc_write_mdio = 0x06E5,
+ ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
+ ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_port_id_led = 0x06E9,
+ ixgbe_aci_opc_set_gpio = 0x06EC,
+ ixgbe_aci_opc_get_gpio = 0x06ED,
+ ixgbe_aci_opc_sff_eeprom = 0x06EE,
+ ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2,
+ ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3,
+
+ /* NVM commands */
+ ixgbe_aci_opc_nvm_read = 0x0701,
+ ixgbe_aci_opc_nvm_erase = 0x0702,
+ ixgbe_aci_opc_nvm_write = 0x0703,
+ ixgbe_aci_opc_nvm_cfg_read = 0x0704,
+ ixgbe_aci_opc_nvm_cfg_write = 0x0705,
+ ixgbe_aci_opc_nvm_checksum = 0x0706,
+ ixgbe_aci_opc_nvm_write_activate = 0x0707,
+ ixgbe_aci_opc_nvm_sr_dump = 0x0707,
+ ixgbe_aci_opc_nvm_save_factory_settings = 0x0708,
+ ixgbe_aci_opc_nvm_update_empr = 0x0709,
+ ixgbe_aci_opc_nvm_pkg_data = 0x070A,
+ ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B,
+
+ /* Alternate Structure Commands */
+ ixgbe_aci_opc_write_alt_direct = 0x0900,
+ ixgbe_aci_opc_write_alt_indirect = 0x0901,
+ ixgbe_aci_opc_read_alt_direct = 0x0902,
+ ixgbe_aci_opc_read_alt_indirect = 0x0903,
+ ixgbe_aci_opc_done_alt_write = 0x0904,
+ ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+
+ /* TCA Events */
+ ixgbe_aci_opc_temp_tca_event = 0x0C94,
+
+ /* debug commands */
+ ixgbe_aci_opc_debug_dump_internals = 0xFF08,
+
+ /* SystemDiagnostic commands */
+ ixgbe_aci_opc_set_health_status_config = 0xFF20,
+ ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21,
+ ixgbe_aci_opc_get_health_status = 0xFF22,
+ ixgbe_aci_opc_clear_health_status = 0xFF23,
+};
+
+/* Get version (direct 0x0001) */
+struct ixgbe_aci_cmd_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+
+#define IXGBE_DRV_VER_STR_LEN_E610 32
+
+/* Send driver version (indirect 0x0002) */
+struct ixgbe_aci_cmd_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ixgbe_aci_cmd_get_exp_err {
+ __le32 reason;
+#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+/* FW update timeout definitions are in milliseconds */
+#define IXGBE_NVM_TIMEOUT 180000
+
+enum ixgbe_aci_res_access_type {
+ IXGBE_RES_READ = 1,
+ IXGBE_RES_WRITE
+};
+
+enum ixgbe_aci_res_ids {
+ IXGBE_NVM_RES_ID = 1,
+ IXGBE_SPD_RES_ID,
+ IXGBE_CHANGE_LOCK_RES_ID,
+ IXGBE_GLOBAL_CFG_LOCK_RES_ID
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ixgbe_aci_cmd_req_res {
+ __le16 res_id;
+ __le16 access_type;
+
+ /* Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided
+ * in milliseconds.
+ */
+ __le32 timeout;
+#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+ /* For SDP: pin ID of the SDP */
+ __le32 res_number;
+ __le16 status;
+#define IXGBE_ACI_RES_GLBL_SUCCESS 0
+#define IXGBE_ACI_RES_GLBL_IN_PROG 1
+#define IXGBE_ACI_RES_GLBL_DONE 2
+ u8 reserved[2];
+};
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ixgbe_aci_cmd_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ixgbe_aci_cmd_list_caps_elem {
+ __le16 cap;
+#define IXGBE_ACI_CAPS_VALID_FUNCTIONS 0x0005
+#define IXGBE_ACI_MAX_VALID_FUNCTIONS 0x8
+#define IXGBE_ACI_CAPS_SRIOV 0x0012
+#define IXGBE_ACI_CAPS_VF 0x0013
+#define IXGBE_ACI_CAPS_VMDQ 0x0014
+#define IXGBE_ACI_CAPS_VSI 0x0017
+#define IXGBE_ACI_CAPS_DCB 0x0018
+#define IXGBE_ACI_CAPS_RSS 0x0040
+#define IXGBE_ACI_CAPS_RXQS 0x0041
+#define IXGBE_ACI_CAPS_TXQS 0x0042
+#define IXGBE_ACI_CAPS_MSIX 0x0043
+#define IXGBE_ACI_CAPS_FD 0x0045
+#define IXGBE_ACI_CAPS_1588 0x0046
+#define IXGBE_ACI_CAPS_MAX_MTU 0x0047
+#define IXGBE_ACI_CAPS_NVM_VER 0x0048
+#define IXGBE_ACI_CAPS_PENDING_NVM_VER 0x0049
+#define IXGBE_ACI_CAPS_OROM_VER 0x004A
+#define IXGBE_ACI_CAPS_PENDING_OROM_VER 0x004B
+#define IXGBE_ACI_CAPS_PENDING_NET_VER 0x004D
+#define IXGBE_ACI_CAPS_INLINE_IPSEC 0x0070
+#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS 0x0072
+#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
+#define IXGBE_ACI_CAPS_NVM_MGMT 0x0080
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0 0x0081
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1 0x0082
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2 0x0083
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+ u8 major_ver;
+ u8 minor_ver;
+ /* Number of resources described by this capability */
+ __le32 number;
+ /* Only meaningful for some types of resources */
+ __le32 logical_id;
+ /* Only meaningful for some types of resources */
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+/* Disable RXEN (direct 0x000C) */
+struct ixgbe_aci_cmd_disable_rxen {
+ u8 lport_num;
+ u8 reserved[15];
+};
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ixgbe_aci_cmd_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define IXGBE_ACI_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.3 : Report mode
+ * 000b - Report topology capabilities, without media
+ * 001b - Report topology capabilities, with media
+ * 010b - Report Active configuration
+ * 011b - Report PHY Type and FEC mode capabilities
+ * 100b - Report Default capabilities
+ */
+#define IXGBE_ACI_REPORT_MODE_M GENMASK(3, 1)
+#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0
+#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1)
+#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2)
+#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29)
+#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 29
+/* The second set of defines is for phy_type_high. */
+#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1)
+#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2)
+#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56)
+#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57)
+#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58)
+#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59)
+#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60)
+#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61)
+#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61
+
+struct ixgbe_aci_cmd_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2)
+#define IXGBE_ACI_PHY_EN_LINK BIT(3)
+#define IXGBE_ACI_PHY_AN_MODE BIT(4)
+#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5)
+#define IXGBE_ACI_PHY_EN_LESM BIT(6)
+#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7)
+#define IXGBE_ACI_PHY_CAPS_MASK GENMASK(7, 0)
+ u8 low_power_ctrl_an;
+#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3)
+#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define IXGBE_ACI_PHY_FEC_MASK 0xdf
+ u8 module_compliance_enforcement;
+#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define IXGBE_ACI_MOD_TYPE_IDENT 1
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX];
+};
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ixgbe_aci_cmd_set_phy_cfg {
+ u8 lport_num;
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set PHY config command data structure */
+struct ixgbe_aci_cmd_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_ENA_VALID_MASK 0xef
+#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2)
+#define IXGBE_ACI_PHY_ENA_LINK BIT(3)
+#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define IXGBE_ACI_PHY_ENA_LESM BIT(6)
+#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ixgbe_aci_cmd_restart_an {
+ u8 lport_num;
+ u8 reserved;
+ u8 cmd_flags;
+#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1)
+#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status {
+ u8 lport_num;
+ u8 reserved;
+ __le16 cmd_flags;
+#define IXGBE_ACI_LSE_M GENMASK(1, 0)
+#define IXGBE_ACI_LSE_NOP 0x0
+#define IXGBE_ACI_LSE_DIS 0x2
+#define IXGBE_ACI_LSE_ENA 0x3
+ /* only response uses this flag */
+#define IXGBE_ACI_LSE_IS_ENABLED 0x1
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status_data {
+ u8 topo_media_conflict;
+#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0)
+#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1)
+#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2)
+#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define IXGBE_ACI_LINK_CFG_ERR BIT(0)
+#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1)
+#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2)
+#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
+#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
+#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
+#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
+ u8 link_info;
+#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */
+#define IXGBE_ACI_LINK_FAULT BIT(1)
+#define IXGBE_ACI_LINK_FAULT_TX BIT(2)
+#define IXGBE_ACI_LINK_FAULT_RX BIT(3)
+#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4)
+#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6)
+#define IXGBE_ACI_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define IXGBE_ACI_AN_COMPLETED BIT(0)
+#define IXGBE_ACI_LP_AN_ABILITY BIT(1)
+#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define IXGBE_ACI_FEC_EN BIT(3)
+#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define IXGBE_ACI_LINK_PAUSE_TX BIT(5)
+#define IXGBE_ACI_LINK_PAUSE_RX BIT(6)
+#define IXGBE_ACI_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0)
+#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define IXGBE_ACI_LINK_TX_ACTIVE 0
+#define IXGBE_ACI_LINK_TX_DRAINED 1
+#define IXGBE_ACI_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0)
+#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1)
+#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2)
+ __le16 max_frame_size;
+ u8 cfg;
+#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0)
+#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1)
+#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2)
+#define IXGBE_ACI_FEC_MASK GENMASK(2, 0)
+ /* Pacing Config */
+#define IXGBE_ACI_CFG_PACING_M GENMASK(6, 3)
+#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7)
+#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0
+#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define IXGBE_ACI_PWR_CLASS_M GENMASK(5, 0)
+#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0
+#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define IXGBE_ACI_LINK_SPEED_M GENMASK(10, 0)
+#define IXGBE_ACI_LINK_SPEED_10MB BIT(0)
+#define IXGBE_ACI_LINK_SPEED_100MB BIT(1)
+#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2)
+#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3)
+#define IXGBE_ACI_LINK_SPEED_5GB BIT(4)
+#define IXGBE_ACI_LINK_SPEED_10GB BIT(5)
+#define IXGBE_ACI_LINK_SPEED_20GB BIT(6)
+#define IXGBE_ACI_LINK_SPEED_25GB BIT(7)
+#define IXGBE_ACI_LINK_SPEED_40GB BIT(8)
+#define IXGBE_ACI_LINK_SPEED_50GB BIT(9)
+#define IXGBE_ACI_LINK_SPEED_100GB BIT(10)
+#define IXGBE_ACI_LINK_SPEED_200GB BIT(11)
+#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15)
+ __le16 reserved3;
+ u8 ext_fec_status;
+#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0)
+#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
+} __packed;
+
+/* Set event mask command (direct 0x0613) */
+struct ixgbe_aci_cmd_set_event_mask {
+ u8 lport_num;
+ u8 reserved[7];
+ __le16 event_mask;
+#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2)
+#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3)
+#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7)
+#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
+ u8 reserved1[6];
+};
+
+struct ixgbe_aci_cmd_link_topo_params {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M GENMASK(3, 0)
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_CTRL 9
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_MUX 10
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M GENMASK(7, 4)
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6
+ u8 index;
+};
+
+struct ixgbe_aci_cmd_link_topo_addr {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le16 handle;
+/* Used to decode the handle field */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+};
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ixgbe_aci_cmd_get_link_topo {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 node_part_num;
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_SI5384 0x25
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_C827 0x31
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49
+ u8 rsvd[9];
+};
+
+/* Get Link Topology Pin (direct, 0x06E1) */
+struct ixgbe_aci_cmd_get_link_topo_pin {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 input_io_params;
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GPIO 0
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RESET_N 1
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_INT_N 2
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_PRESENT_N 3
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_DIS 4
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_MODSEL_N 5
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LPMODE 6
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_FAULT 7
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RX_LOSS 8
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS0 9
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS1 10
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_EEPROM_WP 11
+/* 12 repeats intentionally due to two different uses depending on context */
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LED 12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RED_LED 12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GREEN_LED 13
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_BLUE_LED 14
+#define IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_GPIO 3
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_params;
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_flags;
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_POLARITY BIT(5)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_VALUE BIT(6)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_DRIVEN BIT(7)
+ u8 rsvd[7];
+};
+
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ixgbe_aci_cmd_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 ident_mode;
+ u8 rsvd[13];
+};
+
+#define IXGBE_ACI_PORT_ID_PORT_NUM_VALID BIT(0)
+#define IXGBE_ACI_PORT_IDENT_LED_ORIG 0
+#define IXGBE_ACI_PORT_IDENT_LED_BLINK BIT(0)
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ixgbe_aci_cmd_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_SFF_I2CBUS_7BIT_M GENMASK(6, 0)
+#define IXGBE_ACI_SFF_I2CBUS_10BIT_M GENMASK(9, 0)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M
+#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0
+#define IXGBE_ACI_SFF_UPDATE_PAGE 1
+#define IXGBE_ACI_SFF_UPDATE_BANK 2
+#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3
+#define IXGBE_ACI_SFF_IS_WRITE BIT(15)
+ __le16 i2c_offset;
+ u8 module_bank;
+ u8 module_page;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ixgbe_aci_cmd_nvm {
+#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+#define IXGBE_ACI_NVM_OFFSET_HI_A_MASK GENMASK(15, 8)
+#define IXGBE_ACI_NVM_OFFSET_HI_U_MASK GENMASK(23, 16)
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
+#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6)
+#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7)
+#define IXGBE_ACI_NVM_RESET_LVL_M GENMASK(1, 0) /* Write reply only */
+#define IXGBE_ACI_NVM_POR_FLAG 0
+#define IXGBE_ACI_NVM_PERST_FLAG 1
+#define IXGBE_ACI_NVM_EMPR_FLAG 2
+#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+#define IXGBE_ACI_NVM_NO_PRESERVATION 0x0
+#define IXGBE_ACI_NVM_PRESERVE_SELECTED 0x6
+
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
+ __le16 module_typeid;
+ __le16 length;
+#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for
+ * struct ixgbe_aci_cmd_nvm.
+ */
+#define IXGBE_ACI_NVM_START_POINT 0
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ixgbe_aci_cmd_nvm_checksum {
+ u8 flags;
+#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0)
+#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+/* Used for NVM Set Package Data command - 0x070A */
+struct ixgbe_aci_cmd_nvm_pkg_data {
+ u8 reserved[3];
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_PKG_DELETE BIT(0) /* used for command call */
+
+ u32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Used for Pass Component Table command - 0x070B */
+struct ixgbe_aci_cmd_nvm_pass_comp_tbl {
+ u8 component_response; /* Response only */
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+#define IXGBE_ACI_NVM_PASS_COMP_PARTIAL_CHECK 0x3
+ u8 component_response_code; /* Response only */
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
+#define IXGBE_ACI_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
+#define IXGBE_ACI_NVM_PASS_COMP_STAMP_LOWER 0x2
+#define IXGBE_ACI_NVM_PASS_COMP_INVALID_STAMP_CODE 0x3
+#define IXGBE_ACI_NVM_PASS_COMP_CONFLICT_CODE 0x4
+#define IXGBE_ACI_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE 0x5
+#define IXGBE_ACI_NVM_PASS_COMP_NOT_SUPPORTED_CODE 0x6
+#define IXGBE_ACI_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE 0x7
+#define IXGBE_ACI_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE 0x8
+#define IXGBE_ACI_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE 0xA
+#define IXGBE_ACI_NVM_PASS_COMP_VER_STR_LOWER_CODE 0xB
+ u8 reserved;
+ u8 transfer_flag;
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ixgbe_aci_cmd_nvm_comp_tbl {
+ __le16 comp_class;
+#define NVM_COMP_CLASS_ALL_FW 0x000A
+
+ __le16 comp_id;
+#define NVM_COMP_ID_OROM 0x5
+#define NVM_COMP_ID_NVM 0x6
+#define NVM_COMP_ID_NETLIST 0x8
+
+ u8 comp_class_idx;
+#define FWU_COMP_CLASS_IDX_NOT_USE 0x0
+
+ __le32 comp_cmp_stamp;
+ u8 cvs_type;
+#define NVM_CVS_TYPE_ASCII 0x1
+
+ u8 cvs_len;
+ u8 cvs[]; /* Component Version String */
+} __packed;
+
+/**
+ * struct ixgbe_aci_desc - Admin Command (AC) descriptor
+ * @flags: IXGBE_ACI_FLAG_* flags
+ * @opcode: Admin command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts via the
+ * Admin Command Interface (ACI).
+ * The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Command Interface (ACI) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ixgbe_aci_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct ixgbe_aci_cmd_get_ver get_ver;
+ struct ixgbe_aci_cmd_driver_ver driver_ver;
+ struct ixgbe_aci_cmd_get_exp_err exp_err;
+ struct ixgbe_aci_cmd_req_res res_owner;
+ struct ixgbe_aci_cmd_list_caps get_cap;
+ struct ixgbe_aci_cmd_disable_rxen disable_rxen;
+ struct ixgbe_aci_cmd_get_phy_caps get_phy;
+ struct ixgbe_aci_cmd_set_phy_cfg set_phy;
+ struct ixgbe_aci_cmd_restart_an restart_an;
+ struct ixgbe_aci_cmd_get_link_status get_link_status;
+ struct ixgbe_aci_cmd_set_event_mask set_event_mask;
+ struct ixgbe_aci_cmd_set_port_id_led set_port_id_led;
+ struct ixgbe_aci_cmd_get_link_topo get_link_topo;
+ struct ixgbe_aci_cmd_get_link_topo_pin get_link_topo_pin;
+ struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
+ struct ixgbe_aci_cmd_nvm nvm;
+ struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
+ struct ixgbe_aci_cmd_nvm_pkg_data pkg_data;
+ struct ixgbe_aci_cmd_nvm_pass_comp_tbl pass_comp_tbl;
+ } params;
+};
+
+/* E610-specific adapter context structures */
+
+struct ixgbe_link_status {
+ /* Refer to ixgbe_aci_phy_type for bits definition */
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u16 max_frame_size;
+ u16 link_speed;
+ u16 req_speeds;
+ u8 topo_media_conflict;
+ u8 link_cfg_err;
+ u8 lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 fec_info;
+ u8 pacing;
+ /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE]
+ * of ixgbe_aci_get_phy_caps structure
+ */
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Common HW capabilities for SW use */
+struct ixgbe_hw_caps {
+ /* Write CSR protection */
+ u64 wr_csr_prot;
+ u32 switching_mode;
+ /* switching mode supported - EVB switching (including cloud) */
+#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0
+
+ /* Manageability mode & supported protocols over MCTP */
+ u32 mgmt_mode;
+#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M GENMASK(3, 0)
+#define IXGBE_MGMT_MODE_CTL_INTERFACE_M GENMASK(7, 4)
+#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M GENMASK(11, 8)
+
+ u32 mgmt_protocols_mctp;
+#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0)
+#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1)
+#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2)
+#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3)
+
+ u32 os2bmc;
+ u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
+
+ /* RSS related capabilities */
+ u32 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u32 rss_table_entry_width; /* RSS Entry width in bits */
+
+ /* Tx/Rx queues */
+ u32 num_rxq; /* Number/Total Rx queues */
+ u32 rxq_first_id; /* First queue ID for Rx queues */
+ u32 num_txq; /* Number/Total Tx queues */
+ u32 txq_first_id; /* First queue ID for Tx queues */
+
+ /* MSI-X vectors */
+ u32 num_msix_vectors;
+ u32 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u32 max_mtu;
+
+ /* WOL related */
+ u32 num_wol_proxy_fltr;
+ u32 wol_proxy_vsi_seid;
+
+ /* LED/SDP pin count */
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+
+ /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define IXGBE_MAX_SUPPORTED_GPIO_LED 12
+#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8
+ u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED];
+ u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP];
+ /* SR-IOV virtualization */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+ /* VMDQ */
+ u8 vmdq; /* VMDQ supported */
+
+ /* EVB capabilities */
+ u8 evb_802_1_qbg; /* Edge Virtual Bridging */
+ u8 evb_802_1_qbh; /* Bridge Port Extension */
+
+ u8 dcb;
+ u8 iscsi;
+ u8 ieee_1588;
+ u8 mgmt_cem;
+
+ /* WoL and APM support */
+#define IXGBE_WOL_SUPPORT_M BIT(0)
+#define IXGBE_ACPI_PROG_MTHD_M BIT(1)
+#define IXGBE_PROXY_SUPPORT_M BIT(2)
+ u8 apm_wol_support;
+ u8 acpi_prog_mthd;
+ u8 proxy_support;
+ bool nvm_update_pending_nvm;
+ bool nvm_update_pending_orom;
+ bool nvm_update_pending_netlist;
+#define IXGBE_NVM_PENDING_NVM_IMAGE BIT(0)
+#define IXGBE_NVM_PENDING_OROM BIT(1)
+#define IXGBE_NVM_PENDING_NETLIST BIT(2)
+ bool sec_rev_disabled;
+ bool update_disabled;
+ bool nvm_unified_update;
+ bool netlist_auth;
+#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
+#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1)
+#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
+ bool no_drop_policy_support;
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance; /* false: not supported, true: supported */
+ /* Post update reset restriction */
+ bool reset_restrict_support; /* false: not supported, true: supported */
+
+ /* External topology device images within the NVM */
+#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4
+ u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M GENMASK(15, 8)
+ bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
+ bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+} __packed;
+
+#define IXGBE_OROM_CIV_SIGNATURE "$CIV"
+
+struct ixgbe_orom_civd_info {
+ u8 signature[4]; /* Must match ASCII '$CIV' characters */
+ u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
+ __le32 combo_ver; /* Combo Image Version number */
+ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
+ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */
+};
+
+/* Function specific capabilities */
+struct ixgbe_hw_func_caps {
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
+ u32 guar_num_vsi;
+ struct ixgbe_hw_caps common_cap;
+ bool no_drop_policy_ena;
+};
+
+/* Device wide capabilities */
+struct ixgbe_hw_dev_caps {
+ struct ixgbe_hw_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
+ u32 num_funcs;
+};
+
+/* ACI event information */
+struct ixgbe_aci_event {
+ struct ixgbe_aci_desc desc;
+ u8 *msg_buf;
+ u16 msg_len;
+ u16 buf_len;
+};
+
+struct ixgbe_aci_info {
+ struct mutex lock; /* admin command interface lock */
+ enum ixgbe_aci_err last_status; /* last status of sent admin command */
+};
+
+enum ixgbe_bank_select {
+ IXGBE_ACTIVE_FLASH_BANK,
+ IXGBE_INACTIVE_FLASH_BANK,
+};
+
+/* Option ROM version information */
+struct ixgbe_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+ u32 srev; /* Security revision */
+};
+
+/* NVM version information */
+struct ixgbe_nvm_info {
+ u32 eetrack;
+ u32 srev;
+ u8 major;
+ u8 minor;
+} __packed;
+
+/* netlist version information */
+struct ixgbe_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+} __packed;
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ixgbe_flash_bank {
+ IXGBE_INVALID_FLASH_BANK,
+ IXGBE_1ST_FLASH_BANK,
+ IXGBE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ixgbe_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Ptr to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */
+ enum ixgbe_flash_bank orom_bank; /* Active OROM bank */
+ enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ixgbe_flash_info {
+ struct ixgbe_orom_info orom; /* Option ROM version info */
+ u32 flash_size; /* Available flash size in bytes */
+ struct ixgbe_nvm_info nvm; /* NVM version information */
+ struct ixgbe_netlist_info netlist; /* Netlist version info */
+ struct ixgbe_bank_info banks; /* Flash Bank information */
+ u16 sr_words; /* Shadow RAM size in words */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+#endif /* _IXGBE_TYPE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 81e1df83f136..f1ab95aa8c83 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -66,7 +66,9 @@ int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* Resets the hardware by resetting the transmit and receive units, masks
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
- **/
+ *
+ * Return: 0 on success or negative value on failure
+ */
int ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -133,10 +135,14 @@ mac_reset_top:
hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
hw->mac.ops.init_rx_addrs(hw);
+ /* The following is not supported by E610. */
+ if (hw->mac.type == ixgbe_mac_e610)
+ return status;
+
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
- /* Add the SAN MAC address to the RAR only if it's a valid address */
+ /* Add the SAN MAC address to RAR if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -888,6 +894,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
.validate_checksum = &ixgbe_validate_eeprom_checksum_X540,
.update_checksum = &ixgbe_update_eeprom_checksum_X540,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index b69a680d3ab5..6ed360c5b605 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
#include "ixgbe_type.h"
@@ -17,3 +20,5 @@ int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_X540_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index d9a8cf018d3b..1d2acdb64f45 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe_x540.h"
+#include "ixgbe_x550.h"
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_mbx.h"
@@ -2770,9 +2771,9 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* semaphore, -EIO when command fails or -ENIVAL when incorrect
* params passed.
**/
-static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 sub, u16 len,
- const char *driver_ver)
+int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
{
struct ixgbe_hic_drv_info2 fw_cmd;
int ret_val;
@@ -3505,14 +3506,14 @@ mac_reset_top:
return status;
}
-/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype
+/** ixgbe_set_ethertype_anti_spoofing_x550 - Enable/Disable Ethertype
* anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for Ethertype anti-spoofing
* @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
**/
-static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
- bool enable, int vf)
+void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
+ bool enable, int vf)
{
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
@@ -3527,14 +3528,14 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
-/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning
+/** ixgbe_set_source_address_pruning_x550 - Enable/Disable src address pruning
* @hw: pointer to hardware structure
* @enable: enable or disable source address pruning
* @pool: Rx pool to set source address pruning for
**/
-static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
- bool enable,
- unsigned int pool)
+void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool)
{
u64 pfflp;
@@ -3831,9 +3832,9 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
.set_source_address_pruning = \
- &ixgbe_set_source_address_pruning_X550, \
+ &ixgbe_set_source_address_pruning_x550, \
.set_ethertype_anti_spoofing = \
- &ixgbe_set_ethertype_anti_spoofing_X550, \
+ &ixgbe_set_ethertype_anti_spoofing_x550, \
.disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
@@ -3958,6 +3959,7 @@ static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
.validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \
.update_checksum = &ixgbe_update_eeprom_checksum_X550, \
.calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
+ .read_pba_string = &ixgbe_read_pba_string_generic, \
static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
X550_COMMON_EEP
@@ -4047,7 +4049,7 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_x)
};
-static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
+const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_a)
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
new file mode 100644
index 000000000000..3e4092f8da3e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_X550_H_
+#define _IXGBE_X550_H_
+
+#include "ixgbe_type.h"
+
+extern const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT];
+
+int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver);
+void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool);
+void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
+ bool enable, int vf);
+
+#endif /* _IXGBE_X550_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 3e3b471e53f0..ac58964b2f08 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -508,7 +508,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ring *ring;
if (test_bit(__IXGBE_DOWN, &adapter->state))
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 5f08779c0e4e..a9bc96f6399d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_DEFINES_H_
#define _IXGBEVF_DEFINES_H_
@@ -16,6 +16,9 @@
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_E610_VF 0x57AD
+#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index 66cf17f19408..65580b9cb06f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -201,6 +201,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
/**
* ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @dev: pointer to net device to program
* @xs: pointer to xfrm_state struct
* @mykey: pointer to key array to populate
* @mysalt: pointer to salt value to populate
@@ -208,10 +209,10 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
* This copies the protocol keys and salt to our own data tables. The
* 82599 family only supports the one algorithm.
**/
-static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int ixgbevf_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -256,13 +257,14 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_add_sa - program device with a security association
+ * @dev: pointer to net device to program
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
+static int ixgbevf_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
@@ -310,7 +312,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
rsa.decrypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, rsa.key,
+ &rsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
@@ -363,7 +366,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
if (xs->id.proto & IPPROTO_ESP)
tsa.encrypt = xs->ealg || xs->aead;
- ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, tsa.key,
+ &tsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
@@ -388,11 +392,12 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_del_sa - clear out this specific SA
+ * @dev: pointer to net device to program
* @xs: pointer to transformer state struct
**/
-static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
+static void ixgbevf_ipsec_del_sa(struct net_device *dev,
+ struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
@@ -428,30 +433,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
}
}
-/**
- * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
- * @xs: pointer to transformer state struct
- **/
-static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- if (xs->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl != 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
.xdo_dev_state_add = ixgbevf_ipsec_add_sa,
.xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
- .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
};
/**
@@ -629,7 +613,6 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_14:
- case ixgbe_mbox_api_15:
break;
default:
return;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 130cb868774c..4384e892f967 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_H_
#define _IXGBEVF_H_
@@ -418,6 +418,8 @@ enum ixgbevf_boards {
board_X550EM_x_vf,
board_X550EM_x_vf_hv,
board_x550em_a_vf,
+ board_e610_vf,
+ board_e610_vf_hv,
};
enum ixgbevf_xcast_modes {
@@ -434,12 +436,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
+extern const struct ixgbevf_info ixgbevf_e610_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
-extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info;
/* needed by ethtool.c */
extern const char ixgbevf_driver_name[];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 149911e3002a..535d0f71f521 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
@@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
static char ixgbevf_copyright[] =
- "Copyright (c) 2009 - 2018 Intel Corporation.";
+ "Copyright (c) 2009 - 2024 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
[board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
[board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
+ [board_e610_vf] = &ixgbevf_e610_vf_info,
+ [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -71,6 +73,9 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
+ {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID,
+ IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf},
/* required last entry */
{0, }
};
@@ -732,10 +737,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* verify that the packet does not have any known errors */
if (unlikely(ixgbevf_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
@@ -1044,9 +1045,9 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
return IXGBEVF_XDP_TX;
}
-static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int result = IXGBEVF_XDP_PASS;
struct ixgbevf_ring *xdp_ring;
@@ -1080,7 +1081,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
@@ -1122,6 +1123,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb = rx_ring->skb;
bool xdp_xmit = false;
struct xdp_buff xdp;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -1165,11 +1167,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
#endif
- skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
+ if (xdp_res) {
+ if (xdp_res == IXGBEVF_XDP_TX) {
xdp_xmit = true;
ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
size);
@@ -1189,7 +1191,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -1203,7 +1205,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -2512,7 +2514,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
ixgbevf_napi_disable_all(adapter);
- del_timer_sync(&adapter->service_timer);
+ timer_delete_sync(&adapter->service_timer);
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -3174,8 +3176,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_service_timer(struct timer_list *t)
{
- struct ixgbevf_adapter *adapter = from_timer(adapter, t,
- service_timer);
+ struct ixgbevf_adapter *adapter = timer_container_of(adapter, t,
+ service_timer);
/* Reset the timer */
mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
@@ -4693,6 +4695,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X540_vf:
dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
break;
+ case ixgbe_mac_e610_vf:
+ dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n");
+ break;
case ixgbe_mac_82599_vf:
default:
dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index a55dd978f7ca..24d0237e7a99 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -505,15 +505,3 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = {
.check_for_ack = ixgbevf_check_for_ack_vf,
.check_for_rst = ixgbevf_check_for_rst_vf,
};
-
-/* Mailbox operations when running on Hyper-V.
- * On Hyper-V, PF/VF communication is not through the
- * hardware mailbox; this communication is through
- * a software mediated path.
- * Most mail box operations are noop while running on
- * Hyper-V.
- */
-const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
- .init_params = ixgbevf_init_mbx_params_vf,
- .check_for_rst = ixgbevf_check_for_rst_vf,
-};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 1641d00d8ed3..da7a72ecce7a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "vf.h"
#include "ixgbevf.h"
@@ -1076,3 +1076,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
.mac = ixgbe_mac_x550em_a_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_e610_vf_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
+const struct ixgbevf_info ixgbevf_e610_vf_hv_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index b4eef5b6c172..2d791bc26ae4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
@@ -54,6 +54,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_x550em_a_vf,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
index f20926669318..66d1d23b8ad2 100644
--- a/drivers/net/ethernet/intel/libeth/rx.c
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -186,7 +186,7 @@ err_buf:
return -ENOMEM;
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, LIBETH);
+EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, "LIBETH");
/**
* libeth_rx_fq_destroy - destroy a &page_pool created by libeth
@@ -197,7 +197,7 @@ void libeth_rx_fq_destroy(struct libeth_fq *fq)
kvfree(fq->fqes);
page_pool_destroy(fq->pp);
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, LIBETH);
+EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, "LIBETH");
/**
* libeth_rx_recycle_slow - recycle a libeth page from the NAPI context
@@ -209,7 +209,7 @@ void libeth_rx_recycle_slow(struct page *page)
{
page_pool_recycle_direct(page->pp, page);
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, LIBETH);
+EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, "LIBETH");
/* Converting abstract packet type numbers into a software structure with
* the packet parameters to do O(1) lookup on Rx.
@@ -251,7 +251,7 @@ void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt)
pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot];
pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer];
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH);
+EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, "LIBETH");
/* Module */
diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c
index aceb8d8813c4..66a9825fe11f 100644
--- a/drivers/net/ethernet/intel/libie/rx.c
+++ b/drivers/net/ethernet/intel/libie/rx.c
@@ -116,8 +116,8 @@ const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = {
LIBIE_RX_PT_IP(4),
LIBIE_RX_PT_IP(6),
};
-EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, LIBIE);
+EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, "LIBIE");
MODULE_DESCRIPTION("Intel(R) Ethernet common library");
-MODULE_IMPORT_NS(LIBETH);
+MODULE_IMPORT_NS("LIBETH");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 87c7e6251a4f..891a94d89f4b 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -900,7 +900,8 @@ static void korina_check_media(struct net_device *dev, unsigned int init_media)
static void korina_poll_media(struct timer_list *t)
{
- struct korina_private *lp = from_timer(lp, t, media_check_timer);
+ struct korina_private *lp = timer_container_of(lp, t,
+ media_check_timer);
struct net_device *dev = lp->dev;
korina_check_media(dev, 0);
@@ -1239,7 +1240,7 @@ static int korina_close(struct net_device *dev)
struct korina_private *lp = netdev_priv(dev);
u32 tmp;
- del_timer(&lp->media_check_timer);
+ timer_delete(&lp->media_check_timer);
/* Disable interrupts */
disable_irq(lp->rx_irq);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 660dff5426e7..83ce3bfefa5c 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -90,7 +90,6 @@ struct ltq_etop_priv {
struct net_device *netdev;
struct platform_device *pdev;
struct ltq_eth_data *pldata;
- struct resource *res;
struct mii_bus *mii_bus;
@@ -643,31 +642,14 @@ ltq_etop_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct ltq_etop_priv *priv;
- struct resource *res;
int err;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get etop resource\n");
- err = -ENOENT;
- goto err_out;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "failed to request etop resource\n");
- err = -EBUSY;
- goto err_out;
- }
-
- ltq_etop_membase = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!ltq_etop_membase) {
+ ltq_etop_membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ltq_etop_membase)) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
pdev->id);
- err = -ENOMEM;
+ err = PTR_ERR(ltq_etop_membase);
goto err_out;
}
@@ -679,7 +661,6 @@ ltq_etop_probe(struct platform_device *pdev)
dev->netdev_ops = &ltq_eth_netdev_ops;
dev->ethtool_ops = &ltq_etop_ethtool_ops;
priv = netdev_priv(dev);
- priv->res = res;
priv->pdev = pdev;
priv->pldata = dev_get_platdata(&pdev->dev);
priv->netdev = dev;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 837295fecd17..50f7c59e8a04 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -34,7 +34,6 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
depends on HAS_IOMEM
- select MDIO_DEVRES
select PHYLIB
help
This driver supports the MDIO interface found in the network
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a06048719e84..0ab52c57c648 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1333,7 +1333,8 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
static void mib_counters_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
+ struct mv643xx_eth_private *mp = timer_container_of(mp, t,
+ mib_counters_timer);
mib_counters_update(mp);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
@@ -2247,7 +2248,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
if (unlikely(mp->oom)) {
mp->oom = 0;
- del_timer(&mp->rx_oom);
+ timer_delete(&mp->rx_oom);
}
work_done = 0;
@@ -2306,7 +2307,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
static inline void oom_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
+ struct mv643xx_eth_private *mp = timer_container_of(mp, t, rx_oom);
napi_schedule(&mp->napi);
}
@@ -2521,7 +2522,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
napi_disable(&mp->napi);
- del_timer_sync(&mp->rx_oom);
+ timer_delete_sync(&mp->rx_oom);
netif_carrier_off(dev);
if (dev->phydev)
@@ -2531,7 +2532,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
port_reset(mp);
mv643xx_eth_get_stats(dev);
mib_counters_update(mp);
- del_timer_sync(&mp->mib_counters_timer);
+ timer_delete_sync(&mp->mib_counters_timer);
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
@@ -2704,9 +2705,15 @@ static struct platform_device *port_platdev[3];
static void mv643xx_eth_shared_of_remove(void)
{
+ struct mv643xx_eth_platform_data *pd;
int n;
for (n = 0; n < 3; n++) {
+ if (!port_platdev[n])
+ continue;
+ pd = dev_get_platdata(&port_platdev[n]->dev);
+ if (pd)
+ of_node_put(pd->phy_node);
platform_device_del(port_platdev[n]);
port_platdev[n] = NULL;
}
@@ -2769,8 +2776,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
}
ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
- if (!ppdev)
- return -ENOMEM;
+ if (!ppdev) {
+ ret = -ENOMEM;
+ goto put_err;
+ }
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
ppdev->dev.of_node = pnp;
@@ -2792,6 +2801,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
port_err:
platform_device_put(ppdev);
+put_err:
+ of_node_put(ppd.phy_node);
return ret;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 1fb285fa0bdb..147571fdada3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -284,8 +284,12 @@
MVNETA_TXQ_BUCKET_REFILL_PERIOD))
#define MVNETA_LPI_CTRL_0 0x2cc0
+#define MVNETA_LPI_CTRL_0_TS (0xff << 8)
#define MVNETA_LPI_CTRL_1 0x2cc4
-#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_1_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_1_REQUEST_FORCE BIT(1)
+#define MVNETA_LPI_CTRL_1_MANUAL_MODE BIT(2)
+#define MVNETA_LPI_CTRL_1_TW (0xfff << 4)
#define MVNETA_LPI_CTRL_2 0x2cc8
#define MVNETA_LPI_STATUS 0x2ccc
@@ -541,10 +545,6 @@ struct mvneta_port {
struct mvneta_bm_pool *pool_short;
int bm_win_id;
- bool eee_enabled;
- bool eee_active;
- bool tx_lpi_enabled;
-
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -2342,7 +2342,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
prefetch(data);
xdp_buff_clear_frags_flag(xdp);
xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
- data_len, false);
+ data_len, true);
}
static void
@@ -2396,6 +2396,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
u8 num_frags;
@@ -2410,6 +2411,8 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->ip_summed = mvneta_rx_csum(pp, desc_status);
if (unlikely(xdp_buff_has_frags(xdp)))
@@ -3960,23 +3963,30 @@ static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct mvneta_port, phylink_pcs);
}
-static int mvneta_pcs_validate(struct phylink_pcs *pcs,
- unsigned long *supported,
- const struct phylink_link_state *state)
+static unsigned int mvneta_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
- /* We only support QSGMII, SGMII, 802.3z and RGMII modes.
- * When in 802.3z mode, we must have AN enabled:
+ /* When operating in an 802.3z mode, we must have AN enabled:
* "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1."
+ * Therefore, inband is "required".
*/
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- return -EINVAL;
+ if (phy_interface_mode_is_8023z(interface))
+ return LINK_INBAND_ENABLE;
- return 0;
+ /* QSGMII, SGMII and RGMII can be configured to use inband
+ * signalling of the AN result. Indicate these as "possible".
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_QSGMII ||
+ phy_interface_mode_is_rgmii(interface))
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ /* For any other modes, indicate that inband is not supported. */
+ return LINK_INBAND_DISABLE;
}
-static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
+static void mvneta_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
@@ -4071,7 +4081,7 @@ static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
- .pcs_validate = mvneta_pcs_validate,
+ .pcs_inband_caps = mvneta_pcs_inband_caps,
.pcs_get_state = mvneta_pcs_get_state,
.pcs_config = mvneta_pcs_config,
.pcs_an_restart = mvneta_pcs_an_restart,
@@ -4206,18 +4216,6 @@ static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
return 0;
}
-static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
-{
- u32 lpi_ctl1;
-
- lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
- if (enable)
- lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
- else
- lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
- mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
-}
-
static void mvneta_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@ -4233,9 +4231,6 @@ static void mvneta_mac_link_down(struct phylink_config *config,
val |= MVNETA_GMAC_FORCE_LINK_DOWN;
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
-
- pp->eee_active = false;
- mvneta_set_eee(pp, false);
}
static void mvneta_mac_link_up(struct phylink_config *config,
@@ -4284,11 +4279,56 @@ static void mvneta_mac_link_up(struct phylink_config *config,
}
mvneta_port_up(pp);
+}
+
+static void mvneta_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
+ u32 lpi1;
+
+ lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ lpi1 &= ~(MVNETA_LPI_CTRL_1_REQUEST_ENABLE |
+ MVNETA_LPI_CTRL_1_REQUEST_FORCE |
+ MVNETA_LPI_CTRL_1_MANUAL_MODE);
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
+}
+
+static int mvneta_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
+ u32 ts, tw, lpi0, lpi1, status;
- if (phy && pp->eee_enabled) {
- pp->eee_active = phy_init_eee(phy, false) >= 0;
- mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
+ status = mvreg_read(pp, MVNETA_GMAC_STATUS);
+ if (status & MVNETA_GMAC_SPEED_1000) {
+ /* At 1G speeds, the timer resolution are 1us, and
+ * 802.3 says tw is 16.5us. Round up to 17us.
+ */
+ tw = 17;
+ ts = timer;
+ } else {
+ /* At 100M speeds, the timer resolutions are 10us, and
+ * 802.3 says tw is 30us.
+ */
+ tw = 3;
+ ts = DIV_ROUND_UP(timer, 10);
}
+
+ if (ts > 255)
+ ts = 255;
+
+ /* Configure ts */
+ lpi0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+ lpi0 = u32_replace_bits(lpi0, ts, MVNETA_LPI_CTRL_0_TS);
+ mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi0);
+
+ /* Configure tw and enable LPI generation */
+ lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ lpi1 = u32_replace_bits(lpi1, tw, MVNETA_LPI_CTRL_1_TW);
+ lpi1 |= MVNETA_LPI_CTRL_1_REQUEST_ENABLE;
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
+
+ return 0;
}
static const struct phylink_mac_ops mvneta_phylink_ops = {
@@ -4298,6 +4338,8 @@ static const struct phylink_mac_ops mvneta_phylink_ops = {
.mac_finish = mvneta_mac_finish,
.mac_link_down = mvneta_mac_link_down,
.mac_link_up = mvneta_mac_link_up,
+ .mac_disable_tx_lpi = mvneta_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mvneta_mac_enable_tx_lpi,
};
static int mvneta_mdio_probe(struct mvneta_port *pp)
@@ -4385,6 +4427,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
if (pp->neta_armada3700)
return 0;
+ netdev_lock(port->napi.dev);
spin_lock(&pp->lock);
/*
* Configuring the driver for a new CPU while the driver is
@@ -4392,6 +4435,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
*/
if (pp->is_stopped) {
spin_unlock(&pp->lock);
+ netdev_unlock(port->napi.dev);
return 0;
}
netif_tx_stop_all_queues(pp->dev);
@@ -4411,7 +4455,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
/* Mask all ethernet port interrupts */
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
- napi_enable(&port->napi);
+ napi_enable_locked(&port->napi);
/*
* Enable per-CPU interrupts on the CPU that is
@@ -4432,6 +4476,8 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
+ netdev_unlock(port->napi.dev);
+
return 0;
}
@@ -5099,14 +5145,6 @@ static int mvneta_ethtool_get_eee(struct net_device *dev,
struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
- u32 lpi_ctl0;
-
- lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
-
- eee->eee_enabled = pp->eee_enabled;
- eee->eee_active = pp->eee_active;
- eee->tx_lpi_enabled = pp->tx_lpi_enabled;
- eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
return phylink_ethtool_get_eee(pp->phylink, eee);
}
@@ -5115,7 +5153,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
- u32 lpi_ctl0;
/* The Armada 37x documents do not give limits for this other than
* it being an 8-bit register.
@@ -5123,16 +5160,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
return -EINVAL;
- lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
- lpi_ctl0 &= ~(0xff << 8);
- lpi_ctl0 |= eee->tx_lpi_timer << 8;
- mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
-
- pp->eee_enabled = eee->eee_enabled;
- pp->tx_lpi_enabled = eee->tx_lpi_enabled;
-
- mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
-
return phylink_ethtool_set_eee(pp->phylink, eee);
}
@@ -5446,6 +5473,9 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
!phy_interface_mode_is_rgmii(phy_mode))
return -EINVAL;
+ /* Ensure LPI is disabled */
+ mvneta_mac_disable_tx_lpi(&pp->phylink_config);
+
return 0;
}
@@ -5530,13 +5560,19 @@ static int mvneta_probe(struct platform_device *pdev)
clk_prepare_enable(pp->clk_bus);
pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
- pp->phylink_pcs.neg_mode = true;
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
MAC_100 | MAC_1000FD | MAC_2500FD;
+ /* Setup EEE. Choose 250us idle. Only supported in SGMII modes. */
+ __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.lpi_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.lpi_interfaces);
+ pp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
+ pp->phylink_config.lpi_timer_default = 250;
+ pp->phylink_config.eee_enabled_default = true;
+
phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
pp->phylink_config.supported_interfaces);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 9e02e4367bec..061fcd444d50 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -481,6 +481,11 @@
#define MVPP22_GMAC_INT_SUM_MASK 0xa4
#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
+#define MVPP2_GMAC_LPI_CTRL0 0xc0
+#define MVPP2_GMAC_LPI_CTRL0_TS_MASK GENMASK(15, 8)
+#define MVPP2_GMAC_LPI_CTRL1 0xc4
+#define MVPP2_GMAC_LPI_CTRL1_REQ_EN BIT(0)
+#define MVPP2_GMAC_LPI_CTRL1_TW_MASK GENMASK(15, 4)
/* Per-port XGMAC registers. PPv2.2 and PPv2.3, only for GOP port 0,
* relative to port->base.
@@ -1108,6 +1113,9 @@ struct mvpp2 {
/* Spinlocks for CM3 shared memory configuration */
spinlock_t mss_spinlock;
+
+ /* Spinlock for shared PRS parser memory and shadow table */
+ spinlock_t prs_spinlock;
};
struct mvpp2_pcpu_stats {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 1641791a2d5b..8ed83fb98862 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_PRS_RI_VLAN_MASK),
/* Non IP flow, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
- MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_TAGGED,
0, 0),
};
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 571631a30320..a7872d14a49d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3915,13 +3915,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
while (rx_done < rx_todo) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+ u32 rx_status, timestamp, metasize = 0;
struct mvpp2_bm_pool *bm_pool;
struct page_pool *pp = NULL;
struct sk_buff *skb;
unsigned int frag_size;
dma_addr_t dma_addr;
phys_addr_t phys_addr;
- u32 rx_status, timestamp;
int pool, rx_bytes, err, ret;
struct page *page;
void *data;
@@ -3983,7 +3983,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
xdp_prepare_buff(&xdp, data,
MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
- rx_bytes, false);
+ rx_bytes, true);
ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
@@ -3999,6 +3999,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
ps.rx_bytes += rx_bytes;
continue;
}
+
+ metasize = xdp.data - xdp.data_meta;
}
if (frag_size)
@@ -4038,6 +4040,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
skb_put(skb, rx_bytes);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->ip_summed = mvpp2_rx_csum(port, rx_status);
skb->protocol = eth_type_trans(skb, dev);
@@ -5169,38 +5173,40 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_dropped = dev->stats.tx_dropped;
}
-static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+static int mvpp2_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct mvpp2_port *port = netdev_priv(dev);
void __iomem *ptp;
u32 gcr, int_mask;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
- if (config.tx_type != HWTSTAMP_TX_OFF &&
- config.tx_type != HWTSTAMP_TX_ON)
+ if (config->tx_type != HWTSTAMP_TX_OFF &&
+ config->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
int_mask = gcr = 0;
- if (config.tx_type != HWTSTAMP_TX_OFF) {
+ if (config->tx_type != HWTSTAMP_TX_OFF) {
gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
MVPP22_PTP_INT_MASK_QUEUE0;
}
/* It seems we must also release the TX reset when enabling the TSU */
- if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE)
gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
MVPP22_PTP_GCR_TX_RESET;
if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
mvpp22_tai_start(port->priv->tai);
- if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
mvpp2_modify(ptp + MVPP22_PTP_GCR,
MVPP22_PTP_GCR_RX_RESET |
MVPP22_PTP_GCR_TX_RESET |
@@ -5221,26 +5227,22 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
mvpp22_tai_stop(port->priv->tai);
- port->tx_hwtstamp_type = config.tx_type;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ port->tx_hwtstamp_type = config->tx_type;
return 0;
}
-static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+static int mvpp2_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
-
- memset(&config, 0, sizeof(config));
+ struct mvpp2_port *port = netdev_priv(dev);
- config.tx_type = port->tx_hwtstamp_type;
- config.rx_filter = port->rx_hwtstamp ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ config->tx_type = port->tx_hwtstamp_type;
+ config->rx_filter = port->rx_hwtstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
return 0;
}
@@ -5270,18 +5272,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvpp2_port *port = netdev_priv(dev);
- switch (cmd) {
- case SIOCSHWTSTAMP:
- if (port->hwtstamp)
- return mvpp2_set_ts_config(port, ifr);
- break;
-
- case SIOCGHWTSTAMP:
- if (port->hwtstamp)
- return mvpp2_get_ts_config(port, ifr);
- break;
- }
-
if (!port->phylink)
return -ENOTSUPP;
@@ -5757,6 +5747,28 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack);
}
+static int mvpp2_ethtool_get_eee(struct net_device *dev,
+ struct ethtool_keee *eee)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_get_eee(port->phylink, eee);
+}
+
+static int mvpp2_ethtool_set_eee(struct net_device *dev,
+ struct ethtool_keee *eee)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_set_eee(port->phylink, eee);
+}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -5773,6 +5785,8 @@ static const struct net_device_ops mvpp2_netdev_ops = {
.ndo_set_features = mvpp2_set_features,
.ndo_bpf = mvpp2_xdp,
.ndo_xdp_xmit = mvpp2_xdp_xmit,
+ .ndo_hwtstamp_get = mvpp2_hwtstamp_get,
+ .ndo_hwtstamp_set = mvpp2_hwtstamp_set,
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
@@ -5802,6 +5816,8 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.create_rxfh_context = mvpp2_create_rxfh_context,
.modify_rxfh_context = mvpp2_modify_rxfh_context,
.remove_rxfh_context = mvpp2_remove_rxfh_context,
+ .get_eee = mvpp2_ethtool_get_eee,
+ .set_eee = mvpp2_ethtool_set_eee,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -6188,6 +6204,7 @@ static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
}
static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
@@ -6224,22 +6241,30 @@ static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
.pcs_config = mvpp2_xlg_pcs_config,
};
-static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
- unsigned long *supported,
- const struct phylink_link_state *state)
+static unsigned int mvpp2_gmac_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
- /* When in 802.3z mode, we must have AN enabled:
+ /* When operating in an 802.3z mode, we must have AN enabled:
* Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ * Therefore, inband is "required".
*/
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- return -EINVAL;
+ if (phy_interface_mode_is_8023z(interface))
+ return LINK_INBAND_ENABLE;
- return 0;
+ /* SGMII and RGMII can be configured to use inband signalling of the
+ * AN result. Indicate these as "possible".
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_rgmii(interface))
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ /* For any other modes, indicate that inband is not supported. */
+ return LINK_INBAND_DISABLE;
}
static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
@@ -6343,7 +6368,7 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
- .pcs_validate = mvpp2_gmac_pcs_validate,
+ .pcs_inband_caps = mvpp2_gmac_pcs_inband_caps,
.pcs_get_state = mvpp2_gmac_pcs_get_state,
.pcs_config = mvpp2_gmac_pcs_config,
.pcs_an_restart = mvpp2_gmac_pcs_an_restart,
@@ -6665,6 +6690,55 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
mvpp2_port_disable(port);
}
+static void mvpp2_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL1,
+ MVPP2_GMAC_LPI_CTRL1_REQ_EN, 0);
+}
+
+static int mvpp2_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ u32 ts, tw, lpi1, status;
+
+ status = readl(port->base + MVPP2_GMAC_STATUS0);
+ if (status & MVPP2_GMAC_STATUS0_GMII_SPEED) {
+ /* At 1G speeds, the timer resolution are 1us, and
+ * 802.3 says tw is 16.5us. Round up to 17us.
+ */
+ tw = 17;
+ ts = timer;
+ } else {
+ /* At 100M speeds, the timer resolutions are 10us, and
+ * 802.3 says tw is 30us.
+ */
+ tw = 3;
+ ts = DIV_ROUND_UP(timer, 10);
+ }
+
+ if (ts > 255)
+ ts = 255;
+
+ /* Configure ts */
+ mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL0,
+ MVPP2_GMAC_LPI_CTRL0_TS_MASK,
+ FIELD_PREP(MVPP2_GMAC_LPI_CTRL0_TS_MASK, ts));
+
+ lpi1 = readl(port->base + MVPP2_GMAC_LPI_CTRL1);
+
+ /* Configure tw */
+ lpi1 = u32_replace_bits(lpi1, tw, MVPP2_GMAC_LPI_CTRL1_TW_MASK);
+
+ /* Enable LPI generation */
+ writel(lpi1 | MVPP2_GMAC_LPI_CTRL1_REQ_EN,
+ port->base + MVPP2_GMAC_LPI_CTRL1);
+
+ return 0;
+}
+
static const struct phylink_mac_ops mvpp2_phylink_ops = {
.mac_select_pcs = mvpp2_select_pcs,
.mac_prepare = mvpp2_mac_prepare,
@@ -6672,6 +6746,8 @@ static const struct phylink_mac_ops mvpp2_phylink_ops = {
.mac_finish = mvpp2_mac_finish,
.mac_link_up = mvpp2_mac_link_up,
.mac_link_down = mvpp2_mac_link_down,
+ .mac_enable_tx_lpi = mvpp2_mac_enable_tx_lpi,
+ .mac_disable_tx_lpi = mvpp2_mac_disable_tx_lpi,
};
/* Work-around for ACPI */
@@ -6901,9 +6977,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
for (thread = 0; thread < priv->nthreads; thread++) {
port_pcpu = per_cpu_ptr(port->pcpu, thread);
- hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED_SOFT);
- port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ hrtimer_setup(&port_pcpu->tx_done_timer, mvpp2_hr_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED_SOFT);
port_pcpu->timer_scheduled = false;
port_pcpu->dev = dev;
}
@@ -6940,9 +7015,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->dev_port = port->id;
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
- port->pcs_gmac.neg_mode = true;
port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
- port->pcs_xlg.neg_mode = true;
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
@@ -6950,6 +7023,15 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.mac_capabilities =
MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.lpi_interfaces);
+
+ port->phylink_config.lpi_capabilities = MAC_1000FD | MAC_100FD;
+
+ /* Setup EEE. Choose 250us idle. */
+ port->phylink_config.lpi_timer_default = 250;
+ port->phylink_config.eee_enabled_default = true;
+
if (port->priv->global_tx_fc)
port->phylink_config.mac_capabilities |=
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
@@ -7024,6 +7106,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_port_pcpu;
}
port->phylink = phylink;
+
+ mvpp2_mac_disable_tx_lpi(&port->phylink_config);
} else {
dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
port->phylink = NULL;
@@ -7627,8 +7711,9 @@ static int mvpp2_probe(struct platform_device *pdev)
if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
priv->hw_version = MVPP23;
- /* Init mss lock */
+ /* Init locks for shared packet processor resources */
spin_lock_init(&priv->mss_spinlock);
+ spin_lock_init(&priv->prs_spinlock);
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 9af22f497a40..93e978bdf303 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -23,6 +23,8 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
{
int i;
+ lockdep_assert_held(&priv->prs_spinlock);
+
if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
return -EINVAL;
@@ -43,11 +45,13 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
}
/* Initialize tcam entry from hw */
-int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
- int tid)
+static int __mvpp2_prs_init_from_hw(struct mvpp2 *priv,
+ struct mvpp2_prs_entry *pe, int tid)
{
int i;
+ lockdep_assert_held(&priv->prs_spinlock);
+
if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
return -EINVAL;
@@ -73,6 +77,18 @@ int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
return 0;
}
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid)
+{
+ int err;
+
+ spin_lock_bh(&priv->prs_spinlock);
+ err = __mvpp2_prs_init_from_hw(priv, pe, tid);
+ spin_unlock_bh(&priv->prs_spinlock);
+
+ return err;
+}
+
/* Invalidate tcam hw entry */
static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
{
@@ -374,7 +390,7 @@ static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
bits = mvpp2_prs_sram_ai_get(&pe);
/* Sram store classification lookup ID in AI bits [5:0] */
@@ -441,7 +457,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
/* Entry exist - update port only */
- mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
+ __mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
} else {
/* Entry doesn't exist - create new */
memset(&pe, 0, sizeof(pe));
@@ -469,14 +485,17 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
}
/* Set port to unicast or multicast promiscuous mode */
-void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
- enum mvpp2_prs_l2_cast l2_cast, bool add)
+static void __mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+ enum mvpp2_prs_l2_cast l2_cast,
+ bool add)
{
struct mvpp2_prs_entry pe;
unsigned char cast_match;
unsigned int ri;
int tid;
+ lockdep_assert_held(&priv->prs_spinlock);
+
if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
cast_match = MVPP2_PRS_UCAST_VAL;
tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
@@ -489,7 +508,7 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
/* promiscuous mode - Accept unknown unicast or multicast packets */
if (priv->prs_shadow[tid].valid) {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
} else {
memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
@@ -522,6 +541,14 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
mvpp2_prs_hw_write(priv, &pe);
}
+void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+ enum mvpp2_prs_l2_cast l2_cast, bool add)
+{
+ spin_lock_bh(&priv->prs_spinlock);
+ __mvpp2_prs_mac_promisc_set(priv, port, l2_cast, add);
+ spin_unlock_bh(&priv->prs_spinlock);
+}
+
/* Set entry for dsa packets */
static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
bool tagged, bool extend)
@@ -539,7 +566,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
if (priv->prs_shadow[tid].valid) {
/* Entry exist - update port only */
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
} else {
/* Entry doesn't exist - create new */
memset(&pe, 0, sizeof(pe));
@@ -610,7 +637,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
if (priv->prs_shadow[tid].valid) {
/* Entry exist - update port only */
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
} else {
/* Entry doesn't exist - create new */
memset(&pe, 0, sizeof(pe));
@@ -673,7 +700,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
if (!match)
continue;
@@ -726,7 +753,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
ri_bits = mvpp2_prs_sram_ri_get(&pe);
if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
MVPP2_PRS_RI_VLAN_DOUBLE)
@@ -760,7 +787,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
} else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
}
/* Update ports' mask */
mvpp2_prs_tcam_port_map_set(&pe, port_map);
@@ -800,7 +827,7 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
@@ -849,7 +876,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
ri_bits = mvpp2_prs_sram_ri_get(&pe);
ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
@@ -880,7 +907,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
} else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
}
/* Update ports' mask */
@@ -1213,8 +1240,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
/* Create dummy entries for drop all and promiscuous modes */
mvpp2_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
- mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
- mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
+ __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
+ __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
}
/* Set default entries for various types of dsa packets */
@@ -1533,12 +1560,6 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
struct mvpp2_prs_entry pe;
int err;
- priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
- MVPP2_PRS_DBL_VLANS_MAX,
- GFP_KERNEL);
- if (!priv->prs_double_vlans)
- return -ENOMEM;
-
/* Double VLAN: 0x88A8, 0x8100 */
err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
MVPP2_PRS_PORT_MASK);
@@ -1941,7 +1962,7 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
continue;
- mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(port->priv, &pe, tid);
mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
@@ -1970,6 +1991,8 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock);
+
/* Scan TCAM and see if entry with this <vid,port> already exist */
tid = mvpp2_prs_vid_range_find(port, vid, mask);
@@ -1988,8 +2011,10 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
/* There isn't room for a new VID filter */
- if (tid < 0)
+ if (tid < 0) {
+ spin_unlock_bh(&priv->prs_spinlock);
return tid;
+ }
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
pe.index = tid;
@@ -1997,7 +2022,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
/* Mask all ports */
mvpp2_prs_tcam_port_map_set(&pe, 0);
} else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
}
/* Enable the current port */
@@ -2019,6 +2044,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
mvpp2_prs_hw_write(priv, &pe);
+ spin_unlock_bh(&priv->prs_spinlock);
return 0;
}
@@ -2028,15 +2054,16 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
struct mvpp2 *priv = port->priv;
int tid;
- /* Scan TCAM and see if entry with this <vid,port> already exist */
- tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
+ spin_lock_bh(&priv->prs_spinlock);
- /* No such entry */
- if (tid < 0)
- return;
+ /* Invalidate TCAM entry with this <vid,port>, if it exists */
+ tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
+ if (tid >= 0) {
+ mvpp2_prs_hw_inv(priv, tid);
+ priv->prs_shadow[tid].valid = false;
+ }
- mvpp2_prs_hw_inv(priv, tid);
- priv->prs_shadow[tid].valid = false;
+ spin_unlock_bh(&priv->prs_spinlock);
}
/* Remove all existing VID filters on this port */
@@ -2045,6 +2072,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
struct mvpp2 *priv = port->priv;
int tid;
+ spin_lock_bh(&priv->prs_spinlock);
+
for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
if (priv->prs_shadow[tid].valid) {
@@ -2052,6 +2081,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
priv->prs_shadow[tid].valid = false;
}
}
+
+ spin_unlock_bh(&priv->prs_spinlock);
}
/* Remove VID filering entry for this port */
@@ -2060,10 +2091,14 @@ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
struct mvpp2 *priv = port->priv;
+ spin_lock_bh(&priv->prs_spinlock);
+
/* Invalidate the guard entry */
mvpp2_prs_hw_inv(priv, tid);
priv->prs_shadow[tid].valid = false;
+
+ spin_unlock_bh(&priv->prs_spinlock);
}
/* Add guard entry that drops packets when no VID is matched on this port */
@@ -2079,6 +2114,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock);
+
pe.index = tid;
reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
@@ -2111,6 +2148,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
/* Update shadow table */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
mvpp2_prs_hw_write(priv, &pe);
+
+ spin_unlock_bh(&priv->prs_spinlock);
}
/* Parser default initialization */
@@ -2118,6 +2157,20 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
{
int err, index, i;
+ priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
+ sizeof(*priv->prs_shadow),
+ GFP_KERNEL);
+ if (!priv->prs_shadow)
+ return -ENOMEM;
+
+ priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
+ MVPP2_PRS_DBL_VLANS_MAX,
+ GFP_KERNEL);
+ if (!priv->prs_double_vlans)
+ return -ENOMEM;
+
+ spin_lock_bh(&priv->prs_spinlock);
+
/* Enable tcam table */
mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
@@ -2136,12 +2189,6 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
mvpp2_prs_hw_inv(priv, index);
- priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
- sizeof(*priv->prs_shadow),
- GFP_KERNEL);
- if (!priv->prs_shadow)
- return -ENOMEM;
-
/* Always start from lookup = 0 */
for (index = 0; index < MVPP2_MAX_PORTS; index++)
mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
@@ -2158,26 +2205,13 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_prs_vid_init(priv);
err = mvpp2_prs_etype_init(priv);
- if (err)
- return err;
-
- err = mvpp2_prs_vlan_init(pdev, priv);
- if (err)
- return err;
-
- err = mvpp2_prs_pppoe_init(priv);
- if (err)
- return err;
-
- err = mvpp2_prs_ip6_init(priv);
- if (err)
- return err;
-
- err = mvpp2_prs_ip4_init(priv);
- if (err)
- return err;
+ err = err ? : mvpp2_prs_vlan_init(pdev, priv);
+ err = err ? : mvpp2_prs_pppoe_init(priv);
+ err = err ? : mvpp2_prs_ip6_init(priv);
+ err = err ? : mvpp2_prs_ip4_init(priv);
- return 0;
+ spin_unlock_bh(&priv->prs_spinlock);
+ return err;
}
/* Compare MAC DA with tcam entry data */
@@ -2217,7 +2251,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
(priv->prs_shadow[tid].udf != udf_type))
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
@@ -2229,7 +2263,8 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
}
/* Update parser's mac da entry */
-int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
+static int __mvpp2_prs_mac_da_accept(struct mvpp2_port *port,
+ const u8 *da, bool add)
{
unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
struct mvpp2 *priv = port->priv;
@@ -2261,7 +2296,7 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
/* Mask all ports */
mvpp2_prs_tcam_port_map_set(&pe, 0);
} else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
}
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
@@ -2317,6 +2352,17 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
return 0;
}
+int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
+{
+ int err;
+
+ spin_lock_bh(&port->priv->prs_spinlock);
+ err = __mvpp2_prs_mac_da_accept(port, da, add);
+ spin_unlock_bh(&port->priv->prs_spinlock);
+
+ return err;
+}
+
int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -2345,6 +2391,8 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
unsigned long pmap;
int index, tid;
+ spin_lock_bh(&priv->prs_spinlock);
+
for (tid = MVPP2_PE_MAC_RANGE_START;
tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
@@ -2354,7 +2402,7 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
(priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
pmap = mvpp2_prs_tcam_port_map_get(&pe);
@@ -2375,14 +2423,17 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
continue;
/* Remove entry from TCAM */
- mvpp2_prs_mac_da_accept(port, da, false);
+ __mvpp2_prs_mac_da_accept(port, da, false);
}
+
+ spin_unlock_bh(&priv->prs_spinlock);
}
int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
{
switch (type) {
case MVPP2_TAG_TYPE_EDSA:
+ spin_lock_bh(&priv->prs_spinlock);
/* Add port to EDSA entries */
mvpp2_prs_dsa_tag_set(priv, port, true,
MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
@@ -2393,9 +2444,11 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
mvpp2_prs_dsa_tag_set(priv, port, false,
MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ spin_unlock_bh(&priv->prs_spinlock);
break;
case MVPP2_TAG_TYPE_DSA:
+ spin_lock_bh(&priv->prs_spinlock);
/* Add port to DSA entries */
mvpp2_prs_dsa_tag_set(priv, port, true,
MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
@@ -2406,10 +2459,12 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
mvpp2_prs_dsa_tag_set(priv, port, false,
MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ spin_unlock_bh(&priv->prs_spinlock);
break;
case MVPP2_TAG_TYPE_MH:
case MVPP2_TAG_TYPE_NONE:
+ spin_lock_bh(&priv->prs_spinlock);
/* Remove port form EDSA and DSA entries */
mvpp2_prs_dsa_tag_set(priv, port, false,
MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
@@ -2419,6 +2474,7 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
mvpp2_prs_dsa_tag_set(priv, port, false,
MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ spin_unlock_bh(&priv->prs_spinlock);
break;
default:
@@ -2437,11 +2493,15 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock);
+
tid = mvpp2_prs_tcam_first_free(priv,
MVPP2_PE_LAST_FREE_TID,
MVPP2_PE_FIRST_FREE_TID);
- if (tid < 0)
+ if (tid < 0) {
+ spin_unlock_bh(&priv->prs_spinlock);
return tid;
+ }
pe.index = tid;
@@ -2461,6 +2521,7 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
mvpp2_prs_hw_write(priv, &pe);
+ spin_unlock_bh(&priv->prs_spinlock);
return 0;
}
@@ -2472,6 +2533,8 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&port->priv->prs_spinlock);
+
tid = mvpp2_prs_flow_find(port->priv, port->id);
/* Such entry not exist */
@@ -2480,8 +2543,10 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
tid = mvpp2_prs_tcam_first_free(port->priv,
MVPP2_PE_LAST_FREE_TID,
MVPP2_PE_FIRST_FREE_TID);
- if (tid < 0)
+ if (tid < 0) {
+ spin_unlock_bh(&port->priv->prs_spinlock);
return tid;
+ }
pe.index = tid;
@@ -2492,13 +2557,14 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
/* Update shadow table */
mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
} else {
- mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(port->priv, &pe, tid);
}
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
mvpp2_prs_hw_write(port->priv, &pe);
+ spin_unlock_bh(&port->priv->prs_spinlock);
return 0;
}
@@ -2509,11 +2575,14 @@ int mvpp2_prs_hits(struct mvpp2 *priv, int index)
if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
return -EINVAL;
+ spin_lock_bh(&priv->prs_spinlock);
+
mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
+ spin_unlock_bh(&priv->prs_spinlock);
return val;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
index 4f4d58189118..a88c006ea65b 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -150,17 +150,14 @@ octep_get_ethtool_stats(struct net_device *netdev,
iface_rx_stats,
iface_tx_stats);
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_iq *iq = oct->iq[q];
- struct octep_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- tx_busy_errors += iq->stats.tx_busy;
-
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
- rx_alloc_errors += oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ tx_busy_errors += oct->stats_iq[q].tx_busy;
+
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
+ rx_alloc_errors += oct->stats_oq[q].alloc_failures;
}
i = 0;
data[i++] = rx_packets;
@@ -198,22 +195,18 @@ octep_get_ethtool_stats(struct net_device *netdev,
data[i++] = iface_rx_stats->err_pkts;
/* Per Tx Queue stats */
- for (q = 0; q < oct->num_iqs; q++) {
- struct octep_iq *iq = oct->iq[q];
-
- data[i++] = iq->stats.instr_posted;
- data[i++] = iq->stats.instr_completed;
- data[i++] = iq->stats.bytes_sent;
- data[i++] = iq->stats.tx_busy;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_iq[q].instr_posted;
+ data[i++] = oct->stats_iq[q].instr_completed;
+ data[i++] = oct->stats_iq[q].bytes_sent;
+ data[i++] = oct->stats_iq[q].tx_busy;
}
/* Per Rx Queue stats */
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_oq *oq = oct->oq[q];
-
- data[i++] = oq->stats.packets;
- data[i++] = oq->stats.bytes;
- data[i++] = oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_oq[q].packets;
+ data[i++] = oct->stats_oq[q].bytes;
+ data[i++] = oct->stats_oq[q].alloc_failures;
}
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 549436efc204..24499bb36c00 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -822,7 +822,7 @@ static inline int octep_iq_full_check(struct octep_iq *iq)
if (unlikely(IQ_INSTR_SPACE(iq) >
OCTEP_WAKE_QUEUE_THRESHOLD)) {
netif_start_subqueue(iq->netdev, iq->q_no);
- iq->stats.restart_cnt++;
+ iq->stats->restart_cnt++;
return 0;
}
@@ -960,7 +960,7 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
wmb();
/* Ring Doorbell to notify the NIC of new packets */
writel(iq->fill_cnt, iq->doorbell_reg);
- iq->stats.instr_posted += iq->fill_cnt;
+ iq->stats->instr_posted += iq->fill_cnt;
iq->fill_cnt = 0;
return NETDEV_TX_OK;
@@ -991,37 +991,24 @@ dma_map_err:
static void octep_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
- u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
struct octep_device *oct = netdev_priv(netdev);
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
int q;
- if (netif_running(netdev))
- octep_ctrl_net_get_if_stats(oct,
- OCTEP_CTRL_NET_INVALID_VFID,
- &oct->iface_rx_stats,
- &oct->iface_tx_stats);
-
tx_packets = 0;
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_iq *iq = oct->iq[q];
- struct octep_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
stats->rx_bytes = rx_bytes;
- stats->multicast = oct->iface_rx_stats.mcast_pkts;
- stats->rx_errors = oct->iface_rx_stats.err_pkts;
- stats->collisions = oct->iface_tx_stats.xscol;
- stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
}
/**
@@ -1137,6 +1124,43 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features
return err;
}
+static int octep_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct octep_device *oct = netdev_priv(dev);
+
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr);
+ ivi->spoofchk = true;
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ ivi->trusted = false;
+
+ return 0;
+}
+
+static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct octep_device *oct = netdev_priv(dev);
+ int err;
+
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac);
+ return -EADDRNOTAVAIL;
+ }
+
+ dev_dbg(&oct->pdev->dev, "set vf-%d mac to %pM\n", vf, mac);
+ ether_addr_copy(oct->vf_info[vf].mac_addr, mac);
+ oct->vf_info[vf].flags |= OCTEON_PFVF_FLAG_MAC_SET_BY_PF;
+
+ err = octep_ctrl_net_set_mac_addr(oct, vf, mac, true);
+ if (err)
+ dev_err(&oct->pdev->dev,
+ "Set VF%d MAC address failed via host control Mbox\n",
+ vf);
+
+ return err;
+}
+
static const struct net_device_ops octep_netdev_ops = {
.ndo_open = octep_open,
.ndo_stop = octep_stop,
@@ -1146,6 +1170,8 @@ static const struct net_device_ops octep_netdev_ops = {
.ndo_set_mac_address = octep_set_mac,
.ndo_change_mtu = octep_change_mtu,
.ndo_set_features = octep_set_features,
+ .ndo_get_vf_config = octep_get_vf_config,
+ .ndo_set_vf_mac = octep_set_vf_mac
};
/**
@@ -1197,7 +1223,7 @@ static void octep_hb_timeout_task(struct work_struct *work)
miss_cnt);
rtnl_lock();
if (netif_running(oct->netdev))
- octep_stop(oct->netdev);
+ dev_close(oct->netdev);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index fee59e0e0138..81ac4267811c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -220,6 +220,7 @@ struct octep_iface_link_info {
/* The Octeon VF device specific info data structure.*/
struct octep_pfvf_info {
u8 mac_addr[ETH_ALEN];
+ u32 flags;
u32 mbox_version;
};
@@ -257,11 +258,17 @@ struct octep_device {
/* Pointers to Octeon Tx queues */
struct octep_iq *iq[OCTEP_MAX_IQ];
+ /* Per iq stats */
+ struct octep_iq_stats stats_iq[OCTEP_MAX_IQ];
+
/* Rx queues (OQ: Output Queue) */
u16 num_oqs;
/* Pointers to Octeon Rx queues */
struct octep_oq *oq[OCTEP_MAX_OQ];
+ /* Per oq stats */
+ struct octep_oq_stats stats_oq[OCTEP_MAX_OQ];
+
/* Hardware port number of the PCIe interface */
u16 pcie_port;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
index e6eb98d70f3c..ebecdd29f3bd 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -156,12 +156,23 @@ static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id,
{
int err;
+ if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) {
+ dev_err(&oct->pdev->dev,
+ "VF%d attempted to override administrative set MAC address\n",
+ vf_id);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ return;
+ }
+
err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true);
if (err) {
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
- dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n");
+ dev_err(&oct->pdev->dev, "Set VF%d MAC address failed via host control Mbox\n",
+ vf_id);
return;
}
+
+ ether_addr_copy(oct->vf_info[vf_id].mac_addr, cmd.s_set_mac.mac_addr);
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
}
@@ -171,10 +182,18 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
{
int err;
+ if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) {
+ dev_dbg(&oct->pdev->dev, "VF%d MAC address set by PF\n", vf_id);
+ ether_addr_copy(rsp->s_set_mac.mac_addr,
+ oct->vf_info[vf_id].mac_addr);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ return;
+ }
err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr);
if (err) {
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
- dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n");
+ dev_err(&oct->pdev->dev, "Get VF%d MAC address failed via host control Mbox\n",
+ vf_id);
return;
}
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
index 0dc6eead292a..386a095a99bc 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
@@ -8,8 +8,6 @@
#ifndef _OCTEP_PFVF_MBOX_H_
#define _OCTEP_PFVF_MBOX_H_
-/* VF flags */
-#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */
#define OCTEON_SDP_16K_HW_FRS 16380UL
#define OCTEON_SDP_64K_HW_FRS 65531UL
@@ -23,6 +21,10 @@ enum octep_pfvf_mbox_version {
#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+/* VF flags */
+/* PF has set VF MAC address */
+#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT(0)
+
enum octep_pfvf_mbox_opcode {
OCTEP_PFVF_MBOX_CMD_VERSION,
OCTEP_PFVF_MBOX_CMD_SET_MTU,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 8af75cb37c3e..82b6b19e76b4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -87,7 +87,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
page = dev_alloc_page();
if (unlikely(!page)) {
dev_err(oq->dev, "refill: rx buffer alloc failed\n");
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
@@ -98,7 +98,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
"OQ-%d buffer refill: DMA mapping error!\n",
oq->q_no);
put_page(page);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
oq->buff_info[refill_idx].page = page;
@@ -134,6 +134,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
oq->netdev = oct->netdev;
oq->dev = &oct->pdev->dev;
oq->q_no = q_no;
+ oq->stats = &oct->stats_oq[q_no];
oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
oq->ring_size_mask = oq->max_count - 1;
oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
@@ -443,7 +444,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
if (!skb) {
octep_oq_drop_rx(oq, buff_info,
&read_idx, &desc_used);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
continue;
}
skb_reserve(skb, data_offset);
@@ -494,8 +495,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
oq->host_read_idx = read_idx;
oq->refill_count += desc_used;
- oq->stats.packets += pkt;
- oq->stats.bytes += rx_bytes;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
return pkt;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
index 3b08e2d560dc..b4696c93d0e6 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -186,8 +186,8 @@ struct octep_oq {
*/
u8 __iomem *pkts_sent_reg;
- /* Statistics for this OQ. */
- struct octep_oq_stats stats;
+ /* Pointer to statistics for this OQ. */
+ struct octep_oq_stats *stats;
/* Packets pending to be processed */
u32 pkts_pending;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
index 06851b78aa28..08ee90013fef 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -81,9 +81,9 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- iq->stats.instr_completed += compl_pkts;
- iq->stats.bytes_sent += compl_bytes;
- iq->stats.sgentry_sent += compl_sg;
+ iq->stats->instr_completed += compl_pkts;
+ iq->stats->bytes_sent += compl_bytes;
+ iq->stats->sgentry_sent += compl_sg;
iq->flush_index = fi;
netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
@@ -187,6 +187,7 @@ static int octep_setup_iq(struct octep_device *oct, int q_no)
iq->netdev = oct->netdev;
iq->dev = &oct->pdev->dev;
iq->q_no = q_no;
+ iq->stats = &oct->stats_iq[q_no];
iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
iq->ring_size_mask = iq->max_count - 1;
iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
index 875a2c34091f..58fb39dda977 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -170,8 +170,8 @@ struct octep_iq {
*/
u16 flush_index;
- /* Statistics for this input queue. */
- struct octep_iq_stats stats;
+ /* Pointer to statistics for this input queue. */
+ struct octep_iq_stats *stats;
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_tx_desc_hw *desc_ring;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
index 7b21439a315f..d60441928ba9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -114,12 +114,9 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
iface_tx_stats = &oct->iface_tx_stats;
iface_rx_stats = &oct->iface_rx_stats;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
- struct octep_vf_oq *oq = oct->oq[q];
-
- tx_busy_errors += iq->stats.tx_busy;
- rx_alloc_errors += oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ tx_busy_errors += oct->stats_iq[q].tx_busy;
+ rx_alloc_errors += oct->stats_oq[q].alloc_failures;
}
i = 0;
data[i++] = rx_alloc_errors;
@@ -134,22 +131,18 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
data[i++] = iface_rx_stats->dropped_octets_fifo_full;
/* Per Tx Queue stats */
- for (q = 0; q < oct->num_iqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
-
- data[i++] = iq->stats.instr_posted;
- data[i++] = iq->stats.instr_completed;
- data[i++] = iq->stats.bytes_sent;
- data[i++] = iq->stats.tx_busy;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_iq[q].instr_posted;
+ data[i++] = oct->stats_iq[q].instr_completed;
+ data[i++] = oct->stats_iq[q].bytes_sent;
+ data[i++] = oct->stats_iq[q].tx_busy;
}
/* Per Rx Queue stats */
for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_oq *oq = oct->oq[q];
-
- data[i++] = oq->stats.packets;
- data[i++] = oq->stats.bytes;
- data[i++] = oq->stats.alloc_failures;
+ data[i++] = oct->stats_oq[q].packets;
+ data[i++] = oct->stats_oq[q].bytes;
+ data[i++] = oct->stats_oq[q].alloc_failures;
}
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index 7e6771c9cdbb..420c3f4cf741 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -18,8 +18,6 @@
#include "octep_vf_config.h"
#include "octep_vf_main.h"
-struct workqueue_struct *octep_vf_wq;
-
/* Supported Devices */
static const struct pci_device_id octep_vf_pci_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)},
@@ -574,7 +572,7 @@ static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
* caused queues to get re-enabled after
* being stopped
*/
- iq->stats.restart_cnt++;
+ iq->stats->restart_cnt++;
fallthrough;
case 1: /* Queue left enabled, since IQ is not yet full*/
return 0;
@@ -731,7 +729,7 @@ ring_dbell:
/* Flush the hw descriptors before writing to doorbell */
smp_wmb();
writel(iq->fill_cnt, iq->doorbell_reg);
- iq->stats.instr_posted += iq->fill_cnt;
+ iq->stats->instr_posted += iq->fill_cnt;
iq->fill_cnt = 0;
return NETDEV_TX_OK;
}
@@ -786,27 +784,16 @@ static void octep_vf_get_stats64(struct net_device *netdev,
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
- struct octep_vf_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
stats->rx_bytes = rx_bytes;
- if (!octep_vf_get_if_stats(oct)) {
- stats->multicast = oct->iface_rx_stats.mcast_pkts;
- stats->rx_errors = oct->iface_rx_stats.err_pkts;
- stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
- oct->iface_rx_stats.err_pkts;
- stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
- stats->tx_dropped = oct->iface_tx_stats.dropped;
- }
}
/**
@@ -846,7 +833,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
struct octep_vf_device *oct = netdev_priv(netdev);
netdev_hold(netdev, NULL, GFP_ATOMIC);
- schedule_work(&oct->tx_timeout_task);
+ if (!schedule_work(&oct->tx_timeout_task))
+ netdev_put(netdev, NULL);
+
}
static int octep_vf_set_mac(struct net_device *netdev, void *p)
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
index 5769f62545cd..b9f13506f462 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -246,11 +246,17 @@ struct octep_vf_device {
/* Pointers to Octeon Tx queues */
struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
+ /* Per iq stats */
+ struct octep_vf_iq_stats stats_iq[OCTEP_VF_MAX_IQ];
+
/* Rx queues (OQ: Output Queue) */
u16 num_oqs;
/* Pointers to Octeon Rx queues */
struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
+ /* Per oq stats */
+ struct octep_vf_oq_stats stats_oq[OCTEP_VF_MAX_OQ];
+
/* Hardware port number of the PCIe interface */
u16 pcie_port;
@@ -314,8 +320,6 @@ static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct)
#define octep_vf_read_csr64(octep_vf_dev, reg_off) \
readq((octep_vf_dev)->mmio.hw_addr + (reg_off))
-extern struct workqueue_struct *octep_vf_wq;
-
int octep_vf_device_setup(struct octep_vf_device *oct);
int octep_vf_setup_iqs(struct octep_vf_device *oct);
void octep_vf_free_iqs(struct octep_vf_device *oct);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
index 82821bc28634..d70c8be3cfc4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -87,7 +87,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
page = dev_alloc_page();
if (unlikely(!page)) {
dev_err(oq->dev, "refill: rx buffer alloc failed\n");
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
@@ -98,7 +98,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
"OQ-%d buffer refill: DMA mapping error!\n",
oq->q_no);
put_page(page);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
oq->buff_info[refill_idx].page = page;
@@ -134,6 +134,7 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
oq->netdev = oct->netdev;
oq->dev = &oct->pdev->dev;
oq->q_no = q_no;
+ oq->stats = &oct->stats_oq[q_no];
oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
oq->ring_size_mask = oq->max_count - 1;
oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
@@ -458,8 +459,8 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
oq->host_read_idx = read_idx;
oq->refill_count += desc_used;
- oq->stats.packets += pkt;
- oq->stats.bytes += rx_bytes;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
return pkt;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
index fe46838b5200..9e296b7d7e34 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
@@ -187,7 +187,7 @@ struct octep_vf_oq {
u8 __iomem *pkts_sent_reg;
/* Statistics for this OQ. */
- struct octep_vf_oq_stats stats;
+ struct octep_vf_oq_stats *stats;
/* Packets pending to be processed */
u32 pkts_pending;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
index 47a5c054fdb6..8180e5ce3d7e 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
@@ -82,9 +82,9 @@ int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- iq->stats.instr_completed += compl_pkts;
- iq->stats.bytes_sent += compl_bytes;
- iq->stats.sgentry_sent += compl_sg;
+ iq->stats->instr_completed += compl_pkts;
+ iq->stats->bytes_sent += compl_bytes;
+ iq->stats->sgentry_sent += compl_sg;
iq->flush_index = fi;
netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
@@ -186,6 +186,7 @@ static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
iq->netdev = oct->netdev;
iq->dev = &oct->pdev->dev;
iq->q_no = q_no;
+ iq->stats = &oct->stats_iq[q_no];
iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
iq->ring_size_mask = iq->max_count - 1;
iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
index f338b975103c..1cede90e3a5f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
@@ -129,7 +129,7 @@ struct octep_vf_iq {
u16 flush_index;
/* Statistics for this input queue. */
- struct octep_vf_iq_stats stats;
+ struct octep_vf_iq_stats *stats;
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_vf_tx_desc_hw *desc_ring;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 8216f843a7cd..971993586fb4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -66,8 +66,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) },
{ 0, } /* end of table */
};
@@ -707,6 +717,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ if (idx >= CGX_RX_STAT_GLOBAL_INDEX)
+ lmac_id = 0;
+
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 5d84386ed22d..406c59100a35 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -159,6 +159,7 @@ enum nix_scheduler {
#define SDP_HW_MIN_FRS 16
#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */
#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */
+#define SDP_LINK_CREDIT 0x320202
/* NIX RX action operation*/
#define NIX_RX_ACTIONOP_DROP (0x0ull)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 1e5aa5397504..7d21905deed8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -188,14 +188,13 @@ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
- struct device *sender = &mbox->pdev->dev;
while (!time_after(jiffies, timeout)) {
if (mdev->num_msgs == mdev->msgs_acked)
return 0;
usleep_range(800, 1000);
}
- dev_dbg(sender, "timed out while waiting for rsp\n");
+ trace_otx2_msg_wait_rsp(mbox->pdev);
return -EIO;
}
EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
@@ -219,6 +218,7 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
void *hw_mbase = mdev->hwbase;
+ struct mbox_msghdr *msg;
u64 intr_val;
tx_hdr = hw_mbase + mbox->tx_start;
@@ -251,7 +251,10 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
tx_hdr->num_msgs = mdev->num_msgs;
rx_hdr->num_msgs = 0;
- trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
+ msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset);
+
+ trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size,
+ msg->id, msg->pcifunc);
spin_unlock(&mdev->mbox_lock);
@@ -445,6 +448,14 @@ const char *otx2_mbox_id2name(u16 id)
#define M(_name, _id, _1, _2, _3) case _id: return # _name;
MBOX_MESSAGES
#undef M
+
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_UP_CGX_MESSAGES
+#undef M
+
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_UP_CPT_MESSAGES
+#undef M
default:
return "INVALID ID";
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 62c07407eb94..a213b2663583 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -313,6 +313,10 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
+M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \
+ msg_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \
M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
@@ -520,6 +524,8 @@ struct get_hw_cap_rsp {
u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
u8 nix_shaping; /* Is shaping and coloring supported */
u8 npc_hash_extract; /* Is hash extract supported */
+#define HW_CAP_MACSEC BIT_ULL(1)
+ u64 hw_caps;
};
/* CGX mbox message formats */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index d39d86e694cc..0277d226293e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -143,6 +143,8 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+
mutex_unlock(&rvu->mbox_lock);
return 0;
@@ -925,7 +927,6 @@ void rvu_mcs_exit(struct rvu *rvu)
if (!rvu->mcs_intr_wq)
return;
- flush_workqueue(rvu->mcs_intr_wq);
destroy_workqueue(rvu->mcs_intr_wq);
rvu->mcs_intr_wq = NULL;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index bcc96eed2481..66749b3649c1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -545,8 +545,7 @@ static int ptp_probe(struct pci_dev *pdev,
spin_lock_init(&ptp->ptp_lock);
if (cn10k_ptp_errata(ptp)) {
ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
- hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ptp->hrtimer.function = ptp_reset_thresh;
+ hrtimer_setup(&ptp->hrtimer, ptp_reset_thresh, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
} else {
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index cd0d7b7774f1..a8025f0486c9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1393,8 +1393,6 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
- if (blktype == BLKTYPE_NIX)
- rvu_nix_reset_mac(pfvf, pcifunc);
block = &hw->block[blkaddr];
@@ -1407,6 +1405,10 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (lf < 0) /* This should never happen */
continue;
+ if (blktype == BLKTYPE_NIX) {
+ rvu_nix_reset_mac(pfvf, pcifunc);
+ rvu_npc_clear_ucast_entry(rvu, pcifunc, lf);
+ }
/* Disable the LF */
rvu_write64(rvu, blkaddr, block->lfcfg_reg |
(lf << block->lfshift), 0x00ULL);
@@ -2031,6 +2033,9 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
rsp->nix_shaping = hw->cap.nix_shaping;
rsp->npc_hash_extract = hw->cap.npc_hash_extract;
+ if (rvu->mcs_blk_cnt)
+ rsp->hw_caps = HW_CAP_MACSEC;
+
return 0;
}
@@ -2173,7 +2178,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
if (rsp && err) \
rsp->hdr.rc = err; \
\
- trace_otx2_msg_process(mbox->pdev, _id, err); \
+ trace_otx2_msg_process(mbox->pdev, _id, err, req->pcifunc); \
return rsp ? err : -ENOMEM; \
}
MBOX_MESSAGES
@@ -2634,7 +2639,7 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
- vfs -= 64;
+ vfs = 64;
}
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index a383b5ef5b2d..48f66292ad5c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -30,6 +30,8 @@
#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
+#define PCI_SUBSYS_DEVID_CN20KA 0xC220
+#define PCI_SUBSYS_DEVID_CNF20KA 0xC320
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@@ -967,8 +969,6 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
bool enable);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
- bool enable);
void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan);
void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
@@ -994,6 +994,8 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
+void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf);
+
bool is_npc_intf_tx(u8 intf);
bool is_npc_intf_rx(u8 intf);
bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 992fa0b82e8d..d0331b0e0bfd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -34,7 +34,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
- trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
+ trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req), 0); \
return req; \
}
@@ -272,6 +272,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
+
mutex_unlock(&rvu->mbox_lock);
} while (pfmap);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7fa98aeb3663..4a3370a40dd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -13,19 +13,26 @@
/* RVU LMTST */
#define LMT_TBL_OP_READ 0
#define LMT_TBL_OP_WRITE 1
-#define LMT_MAP_TABLE_SIZE (128 * 1024)
#define LMT_MAPTBL_ENTRY_SIZE 16
+#define LMT_MAX_VFS 256
+
+#define LMT_MAP_ENTRY_ENA BIT_ULL(20)
+#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16)
/* Function to perform operations (read/write) on lmtst map table */
static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
int lmt_tbl_op)
{
void __iomem *lmt_map_base;
- u64 tbl_base;
+ u64 tbl_base, cfg;
+ int pfs, vfs;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+ cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ vfs = 1 << (cfg & 0xF);
+ pfs = 1 << ((cfg >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
return -ENOMEM;
@@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
*val = readq(lmt_map_base + index);
} else {
writeq((*val), (lmt_map_base + index));
+
+ cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1);
+ /* 2048 LMTLINES */
+ cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6);
+
+ writeq(cfg, (lmt_map_base + (index + 8)));
+
/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
* changes effective. Write 1 for flush and read is being used as a
* barrier and sets up a data dependency. Write to 0 after a write
@@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
#define LMT_MAP_TBL_W1_OFF 8
static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
{
- return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) +
(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
}
@@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
mutex_lock(&rvu->rsrc_lock);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
- pf = rvu_get_pf(pcifunc) & 0x1F;
+ pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK;
val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 148144f5b61d..c827da626471 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -553,6 +553,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
u64 lmt_addr, val, tbl_base;
int pf, vf, num_vfs, hw_vfs;
void __iomem *lmt_map_base;
+ int apr_pfs, apr_vfs;
int buf_size = 10240;
size_t off = 0;
int index = 0;
@@ -568,8 +569,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
return -ENOMEM;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+ val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ apr_vfs = 1 << (val & 0xF);
+ apr_pfs = 1 << ((val >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
+ LMT_MAPTBL_ENTRY_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
kfree(buf);
@@ -591,7 +596,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
pf);
- index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
(tbl_base + index));
lmt_addr = readq(lmt_map_base + index);
@@ -604,7 +609,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
/* Reading num of VFs per PF */
rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
for (vf = 0; vf < num_vfs; vf++) {
- index = (pf * rvu->hw->total_vfs * 16) +
+ index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
off += scnprintf(&buf[off], buf_size - 1 - off,
"PF%d:VF%d \t\t", pf, vf);
@@ -917,19 +922,18 @@ static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
/* The 'qsize' entry dumps current Aura/Pool context Qsize
* and each context's current enable/disable status in a bitmap.
*/
-static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
+static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused,
int blktype)
{
- void (*print_qsize)(struct seq_file *filp,
+ void (*print_qsize)(struct seq_file *s,
struct rvu_pfvf *pfvf) = NULL;
- struct dentry *current_dir;
struct rvu_pfvf *pfvf;
struct rvu *rvu;
int qsize_id;
u16 pcifunc;
int blkaddr;
- rvu = filp->private;
+ rvu = s->private;
switch (blktype) {
case BLKTYPE_NPA:
qsize_id = rvu->rvu_dbg.npa_qsize_id;
@@ -945,32 +949,28 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
return -EINVAL;
}
- if (blktype == BLKTYPE_NPA) {
+ if (blktype == BLKTYPE_NPA)
blkaddr = BLKADDR_NPA;
- } else {
- current_dir = filp->file->f_path.dentry->d_parent;
- blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
- BLKADDR_NIX1 : BLKADDR_NIX0);
- }
+ else
+ blkaddr = debugfs_get_aux_num(s->file);
if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
- print_qsize(filp, pfvf);
+ print_qsize(s, pfvf);
return 0;
}
-static ssize_t rvu_dbg_qsize_write(struct file *filp,
+static ssize_t rvu_dbg_qsize_write(struct file *file,
const char __user *buffer, size_t count,
loff_t *ppos, int blktype)
{
char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
- struct seq_file *seqfile = filp->private_data;
+ struct seq_file *seqfile = file->private_data;
char *cmd_buf, *cmd_buf_tmp, *subtoken;
struct rvu *rvu = seqfile->private;
- struct dentry *current_dir;
int blkaddr;
u16 pcifunc;
int ret, lf;
@@ -996,13 +996,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
goto qsize_write_done;
}
- if (blktype == BLKTYPE_NPA) {
+ if (blktype == BLKTYPE_NPA)
blkaddr = BLKADDR_NPA;
- } else {
- current_dir = filp->f_path.dentry->d_parent;
- blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
- BLKADDR_NIX1 : BLKADDR_NIX0);
- }
+ else
+ blkaddr = debugfs_get_aux_num(file);
if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
ret = -EINVAL;
@@ -2704,8 +2701,8 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
&rvu_dbg_nix_ndc_tx_hits_miss_fops);
debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_ndc_rx_hits_miss_fops);
- debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_qsize_fops);
+ debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ blkaddr, &rvu_dbg_nix_qsize_fops);
debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_band_prof_ctx_fops);
debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
@@ -2854,28 +2851,14 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
}
-static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
+static int rvu_dbg_derive_lmacid(struct seq_file *s)
{
- struct dentry *current_dir;
- char *buf;
-
- current_dir = filp->file->f_path.dentry->d_parent;
- buf = strrchr(current_dir->d_name.name, 'c');
- if (!buf)
- return -EINVAL;
-
- return kstrtoint(buf + 1, 10, lmac_id);
+ return debugfs_get_aux_num(s->file);
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused)
{
- int lmac_id, err;
-
- err = rvu_dbg_derive_lmacid(filp, &lmac_id);
- if (!err)
- return cgx_print_stats(filp, lmac_id);
-
- return err;
+ return cgx_print_stats(s, rvu_dbg_derive_lmacid(s));
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
@@ -2933,15 +2916,9 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
return 0;
}
-static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
{
- int err, lmac_id;
-
- err = rvu_dbg_derive_lmacid(filp, &lmac_id);
- if (!err)
- return cgx_print_dmac_flt(filp, lmac_id);
-
- return err;
+ return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s));
}
RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
@@ -2980,10 +2957,10 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
rvu->rvu_dbg.lmac =
debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
- debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
- cgx, &rvu_dbg_cgx_stat_fops);
- debugfs_create_file("mac_filter", 0600,
- rvu->rvu_dbg.lmac, cgx,
+ debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac,
+ cgx, lmac_id, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file_aux_num("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx, lmac_id,
&rvu_dbg_cgx_dmac_flt_fops);
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index dab4deca893f..27c3a2daaaa9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -207,7 +207,7 @@ static void rvu_nix_unregister_interrupts(struct rvu *rvu)
rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
}
- for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
+ for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++)
if (rvu->irq_allocated[offs + i]) {
free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
rvu->irq_allocated[offs + i] = false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 5d5a01dbbca1..613655fcd34f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -569,9 +569,17 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
mutex_unlock(&rvu->rsrc_lock);
}
-int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct msg_rsp *rsp)
+static u16 nix_get_channel(u16 chan, bool cpt_link)
+{
+ /* CPT channel for a given link channel is always
+ * assumed to be BIT(11) set in link channel.
+ */
+ return cpt_link ? chan | BIT(11) : chan;
+}
+
+static int nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp, bool cpt_link)
{
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, pf, type, err;
@@ -579,6 +587,7 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
struct nix_bp *bp;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
@@ -589,6 +598,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
if (is_sdp_pfvf(pcifunc))
type = NIX_INTF_TYPE_SDP;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
if (err)
@@ -597,8 +609,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ chan_v = nix_get_channel(chan, cpt_link);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg & ~BIT_ULL(16));
if (type == NIX_INTF_TYPE_LBK) {
@@ -617,6 +630,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, true);
+}
+
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
@@ -696,15 +723,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
return bpid;
}
-int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct nix_bp_cfg_rsp *rsp)
+static int nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp,
+ bool cpt_link)
{
int blkaddr, pf, type, chan_id = 0;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
u16 chan_base, chan;
s16 bpid, bpid_base;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
@@ -717,6 +746,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
type != NIX_INTF_TYPE_SDP)
return 0;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
@@ -730,9 +762,11 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return -EINVAL;
}
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ chan_v = nix_get_channel(chan, cpt_link);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
cfg &= ~GENMASK_ULL(8, 0);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
chan_id++;
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
@@ -750,6 +784,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, true);
+}
+
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
@@ -4672,6 +4720,9 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
+ /* Set SDP link credit */
+ rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT);
+
/* Set default min/max packet lengths allowed on NIX Rx links.
*
* With HW reset minlen value of 60byte, HW will treat ARP pkts
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 821fe242f821..da15bb451178 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -820,24 +820,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
- bool enable)
-{
- struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, index;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
- if (blkaddr < 0)
- return;
-
- /* Get 'pcifunc' of PF device */
- pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
-
- index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
- NIXLF_BCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
-}
-
void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan)
{
@@ -1125,6 +1107,7 @@ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
int nixlf, bool enable)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
int index, blkaddr;
@@ -1133,9 +1116,12 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
return;
/* Ucast MCAM match entry of this PF/VF */
- index = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_UCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ if (npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC),
+ pfvf->nix_rx_intf)) {
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ }
/* Nothing to do for VFs, on platforms where pkt replication
* is not supported
@@ -3588,3 +3574,33 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
return 0;
}
+
+void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int ucast_idx, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, ucast_idx, false);
+
+ npc_set_mcam_action(rvu, mcam, blkaddr, ucast_idx, 0);
+
+ npc_clear_mcam_entry(rvu, mcam, blkaddr, ucast_idx);
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == ucast_idx) {
+ list_del(&rule->list);
+ kfree(rule);
+ break;
+ }
+ }
+ mutex_unlock(&mcam->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index da69e454662a..1b765045aa63 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1452,23 +1452,21 @@ process_flow:
* hence modify pcifunc accordingly.
*/
- /* AF installing for a PF/VF */
- if (!req->hdr.pcifunc)
+ if (!req->hdr.pcifunc) {
+ /* AF installing for a PF/VF */
target = req->vf;
-
- /* PF installing for its VF */
- if (!from_vf && req->vf && !from_rep_dev) {
+ } else if (!from_vf && req->vf && !from_rep_dev) {
+ /* PF installing for its VF */
target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
pf_set_vfs_mac = req->default_rule &&
(req->features & BIT_ULL(NPC_DMAC));
- }
-
- /* Representor device installing for a representee */
- if (from_rep_dev && req->vf)
+ } else if (from_rep_dev && req->vf) {
+ /* Representor device installing for a representee */
target = req->vf;
- else
+ } else {
/* msg received from PF/VF */
target = req->hdr.pcifunc;
+ }
/* ignore chan_mask in case pf func is not AF, revisit later */
if (!is_pffunc_af(req->hdr.pcifunc))
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
index 052ae5923e3a..32953cca108c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
@@ -60,6 +60,8 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+
mutex_unlock(&rvu->mbox_lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
index 775fd4c35794..19e0d16b12f6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -11,3 +11,5 @@
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc);
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt);
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_status);
+EXPORT_TRACEPOINT_SYMBOL(otx2_parse_dump);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
index 5704520f9b02..4cd0fc4b0d20 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -18,33 +18,42 @@
#include "mbox.h"
TRACE_EVENT(otx2_msg_alloc,
- TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
- TP_ARGS(pdev, id, size),
+ TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size, u16 pcifunc),
+ TP_ARGS(pdev, id, size, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, id)
__field(u64, size)
+ __field(u16, pcifunc)
),
TP_fast_assign(__assign_str(dev);
__entry->id = id;
__entry->size = size;
+ __entry->pcifunc = pcifunc;
),
- TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev),
- otx2_mbox_id2name(__entry->id), __entry->size)
+ TP_printk("[%s] msg:(%s) size:%lld pcifunc:0x%x\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->size,
+ __entry->pcifunc)
);
TRACE_EVENT(otx2_msg_send,
- TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size),
- TP_ARGS(pdev, num_msgs, msg_size),
+ TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size,
+ u16 id, u16 pcifunc),
+ TP_ARGS(pdev, num_msgs, msg_size, id, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, num_msgs)
__field(u64, msg_size)
+ __field(u16, id)
+ __field(u16, pcifunc)
),
TP_fast_assign(__assign_str(dev);
__entry->num_msgs = num_msgs;
__entry->msg_size = msg_size;
+ __entry->id = id;
+ __entry->pcifunc = pcifunc;
),
- TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev),
- __entry->num_msgs, __entry->msg_size)
+ TP_printk("[%s] sent %d msg(s) of size:%lld msg:(%s) pcifunc:0x%x\n",
+ __get_str(dev), __entry->num_msgs, __entry->msg_size,
+ otx2_mbox_id2name(__entry->id), __entry->pcifunc)
);
TRACE_EVENT(otx2_msg_check,
@@ -81,18 +90,73 @@ TRACE_EVENT(otx2_msg_interrupt,
);
TRACE_EVENT(otx2_msg_process,
- TP_PROTO(const struct pci_dev *pdev, u16 id, int err),
- TP_ARGS(pdev, id, err),
+ TP_PROTO(const struct pci_dev *pdev, u16 id, int err, u16 pcifunc),
+ TP_ARGS(pdev, id, err, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, id)
__field(int, err)
+ __field(u16, pcifunc)
),
TP_fast_assign(__assign_str(dev);
__entry->id = id;
__entry->err = err;
+ __entry->pcifunc = pcifunc;
+ ),
+ TP_printk("[%s] msg:(%s) error:%d pcifunc:0x%x\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id),
+ __entry->err, __entry->pcifunc)
+);
+
+TRACE_EVENT(otx2_msg_wait_rsp,
+ TP_PROTO(const struct pci_dev *pdev),
+ TP_ARGS(pdev),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ ),
+ TP_fast_assign(__assign_str(dev)
+ ),
+ TP_printk("[%s] timed out while waiting for response\n",
+ __get_str(dev))
+);
+
+TRACE_EVENT(otx2_msg_status,
+ TP_PROTO(const struct pci_dev *pdev, const char *msg, u16 num_msgs),
+ TP_ARGS(pdev, msg, num_msgs),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u16, num_msgs)
+ ),
+ TP_fast_assign(__assign_str(dev);
+ __assign_str(str);
+ __entry->num_msgs = num_msgs;
+ ),
+ TP_printk("[%s] %s num_msgs:%d\n", __get_str(dev),
+ __get_str(str), __entry->num_msgs)
+);
+
+TRACE_EVENT(otx2_parse_dump,
+ TP_PROTO(const struct pci_dev *pdev, char *msg, u64 *word),
+ TP_ARGS(pdev, msg, word),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u64, w0)
+ __field(u64, w1)
+ __field(u64, w2)
+ __field(u64, w3)
+ __field(u64, w4)
+ __field(u64, w5)
+ ),
+ TP_fast_assign(__assign_str(dev);
+ __assign_str(str);
+ __entry->w0 = *(word + 0);
+ __entry->w1 = *(word + 1);
+ __entry->w2 = *(word + 2);
+ __entry->w3 = *(word + 3);
+ __entry->w4 = *(word + 4);
+ __entry->w5 = *(word + 5);
),
- TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev),
- otx2_mbox_id2name(__entry->id), __entry->err)
+ TP_printk("[%s] nix parse %s W0:%#llx W1:%#llx W2:%#llx W3:%#llx W4:%#llx W5:%#llx\n",
+ __get_str(dev), __get_str(str), __entry->w0, __entry->w1, __entry->w2,
+ __entry->w3, __entry->w4, __entry->w5)
);
#endif /* __RVU_TRACE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index dbc971266865..69e0778f9ac1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -9,11 +9,12 @@ obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
- otx2_devlink.o qos_sq.o qos.o
+ otx2_devlink.o qos_sq.o qos.o otx2_xsk.o
rvu_nicvf-y := otx2_vf.o
rvu_rep-y := rep.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index a15cc86635d6..7f6a435ac680 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -112,9 +112,12 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
+ struct otx2_pool *pool;
dma_addr_t bufptr;
int num_ptrs = 1;
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
@@ -124,7 +127,9 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
break;
}
cq->pool_ptrs--;
- ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ ptrs[num_ptrs] = pool->xsk_pool ?
+ (u64)bufptr : (u64)bufptr + OTX2_HEAD_ROOM;
+
num_ptrs++;
if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
@@ -352,9 +357,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
mutex_lock(&pfvf->mbox.lock);
/* Remove RQ's policer mapping */
- for (qidx = 0; qidx < hw->rx_queues; qidx++)
- cn10k_map_unmap_rq_policer(pfvf, qidx,
- hw->matchall_ipolicer, false);
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false);
+ if (rc)
+ dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).",
+ qidx, rc);
+ }
rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
new file mode 100644
index 000000000000..a6500e3673f2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -0,0 +1,1041 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#include <net/xfrm.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <crypto/aead.h>
+#include <crypto/gcm.h>
+
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "cn10k_ipsec.h"
+
+static bool is_dev_support_ipsec_offload(struct pci_dev *pdev)
+{
+ return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev);
+}
+
+static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf)
+{
+ enum cn10k_cpt_hw_state_e state;
+
+ while (true) {
+ state = atomic_cmpxchg(&pf->ipsec.cpt_state,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE);
+ if (state == CN10K_CPT_HW_AVAILABLE)
+ return true;
+ if (state == CN10K_CPT_HW_UNAVAILABLE)
+ return false;
+
+ mdelay(1);
+ }
+}
+
+static void cn10k_cpt_device_set_available(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE);
+}
+
+static void cn10k_cpt_device_set_unavailable(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE);
+}
+
+static int cn10k_outb_cptlf_attach(struct otx2_nic *pf)
+{
+ struct rsrc_attach *attach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ /* Get memory to put this msg */
+ attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox);
+ if (!attach)
+ goto unlock;
+
+ attach->cptlfs = true;
+ attach->modify = true;
+
+ /* Send attach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_detach(struct otx2_nic *pf)
+{
+ struct rsrc_detach *detach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox);
+ if (!detach)
+ goto unlock;
+
+ detach->partial = true;
+ detach->cptlfs = true;
+
+ /* Send detach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf)
+{
+ struct cpt_lf_alloc_req_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ /* PF function */
+ req->nix_pf_func = pf->pcifunc;
+ /* Enable SE-IE Engine Group */
+ req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP;
+
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_free(struct otx2_nic *pf)
+{
+ mutex_lock(&pf->mbox.lock);
+ otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox);
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
+static int cn10k_outb_cptlf_config(struct otx2_nic *pf)
+{
+ struct cpt_inline_ipsec_cfg_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ req->dir = CPT_INLINE_OUTBOUND;
+ req->enable = 1;
+ req->nix_pf_func = pf->pcifunc;
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf)
+{
+ u64 reg_val;
+
+ /* Set Execution Enable of instruction queue */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ reg_val |= BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Set iqueue's enqueuing */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL);
+ reg_val |= BIT_ULL(0);
+ otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val);
+}
+
+static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf)
+{
+ u32 inflight, grb_cnt, gwb_cnt;
+ u32 nq_ptr, dq_ptr;
+ int timeout = 20;
+ u64 reg_val;
+ int cnt;
+
+ /* Disable instructions enqueuing */
+ otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull);
+
+ /* Wait for instruction queue to become empty.
+ * CPT_LF_INPROG.INFLIGHT count is zero
+ */
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ if (!inflight)
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0) {
+ netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n");
+ break;
+ }
+ } while (1);
+
+ /* Disable executions in the LF's queue,
+ * the queue should be empty at this point
+ */
+ reg_val &= ~BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Wait for instruction queue to become empty */
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ if (reg_val & BIT_ULL(31))
+ cnt = 0;
+ else
+ cnt++;
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR);
+ nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ } while ((cnt < 10) && (nq_ptr != dq_ptr));
+
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val);
+ gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val);
+ if (inflight == 0 && gwb_cnt < 40 &&
+ (grb_cnt == 0 || grb_cnt == 40))
+ cnt++;
+ else
+ cnt = 0;
+ } while (cnt < 10);
+}
+
+/* Allocate memory for CPT outbound Instruction queue.
+ * Instruction queue memory format is:
+ * -----------------------------
+ * | Instruction Group memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | x 16 Bytes) |
+ * | |
+ * ----------------------------- <-- CPT_LF_Q_BASE[ADDR]
+ * | Flow Control (128 Bytes) |
+ * | |
+ * -----------------------------
+ * | Instruction Memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | × 40 × 64 bytes) |
+ * | |
+ * -----------------------------
+ */
+static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN +
+ CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN;
+
+ iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size,
+ &iq->real_dma_addr, GFP_KERNEL);
+ if (!iq->real_vaddr)
+ return -ENOMEM;
+
+ /* iq->vaddr/dma_addr points to Flow Control location */
+ iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+ iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+
+ /* Align pointers */
+ iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN);
+ iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN);
+ return 0;
+}
+
+static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ if (iq->real_vaddr)
+ dma_free_coherent(pf->dev, iq->size, iq->real_vaddr,
+ iq->real_dma_addr);
+
+ iq->real_vaddr = NULL;
+ iq->vaddr = NULL;
+}
+
+static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf)
+{
+ u64 reg_val;
+ int ret;
+
+ /* Allocate Memory for CPT IQ */
+ ret = cn10k_outb_cptlf_iq_alloc(pf);
+ if (ret)
+ return ret;
+
+ /* Disable IQ */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr);
+
+ /* Set IQ size */
+ reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 +
+ CN10K_CPT_EXTRA_SIZE_DIV40);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val);
+
+ return 0;
+}
+
+static int cn10k_outb_cptlf_init(struct otx2_nic *pf)
+{
+ int ret;
+
+ /* Initialize CPTLF Instruction Queue (IQ) */
+ ret = cn10k_outb_cptlf_iq_init(pf);
+ if (ret)
+ return ret;
+
+ /* Configure CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_config(pf);
+ if (ret)
+ goto iq_clean;
+
+ /* Enable CPTLF IQ */
+ cn10k_outb_cptlf_iq_enable(pf);
+ return 0;
+iq_clean:
+ cn10k_outb_cptlf_iq_free(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int ret;
+
+ /* Attach a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_attach(pf);
+ if (ret)
+ return ret;
+
+ /* Allocate a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_alloc(pf);
+ if (ret)
+ goto detach;
+
+ /* Initialize the CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_init(pf);
+ if (ret)
+ goto lf_free;
+
+ pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf,
+ CN10K_CPT_LF_NQX(0));
+
+ /* Set ipsec offload enabled for this device */
+ pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ cn10k_cpt_device_set_available(pf);
+ return 0;
+
+lf_free:
+ cn10k_outb_cptlf_free(pf);
+detach:
+ cn10k_outb_cptlf_detach(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
+{
+ int ret;
+
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ netdev_err(pf->netdev, "CPT LF device unavailable\n");
+ return -ENODEV;
+ }
+
+ /* Set ipsec offload disabled for this device */
+ pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ /* Disable CPTLF Instruction Queue (IQ) */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address and size to 0 */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0);
+
+ /* Free CPTLF IQ */
+ cn10k_outb_cptlf_iq_free(pf);
+
+ /* Free and detach CPT LF */
+ cn10k_outb_cptlf_free(pf);
+ ret = cn10k_outb_cptlf_detach(pf);
+ if (ret)
+ netdev_err(pf->netdev, "Failed to detach CPT LF\n");
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return ret;
+}
+
+static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst,
+ u64 size)
+{
+ struct otx2_lmt_info *lmt_info;
+ u64 val = 0, tar_addr = 0;
+
+ lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id());
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_info->lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, inst, size);
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf,
+ struct cpt_res_s *res)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u64 *completion_ptr = (u64 *)res;
+
+ do {
+ if (time_after(jiffies, timeout)) {
+ netdev_err(pf->netdev, "CPT response timeout\n");
+ return -EBUSY;
+ }
+ } while ((READ_ONCE(*completion_ptr) & CN10K_CPT_COMP_E_MASK) ==
+ CN10K_CPT_COMP_E_NOTDONE);
+
+ if (!(res->compcode == CN10K_CPT_COMP_E_GOOD ||
+ res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) {
+ netdev_err(pf->netdev, "compcode=%x doneint=%x\n",
+ res->compcode, res->doneint);
+ netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n",
+ res->uc_compcode, (u64)res->uc_info, res->esn);
+ }
+ return 0;
+}
+
+static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info)
+{
+ dma_addr_t res_iova, dptr_iova, sa_iova;
+ struct cn10k_tx_sa_s *sa_dptr;
+ struct cpt_inst_s inst = {};
+ struct cpt_res_s *res;
+ u32 sa_size, off;
+ u64 *sptr, *dptr;
+ u64 reg_val;
+ int ret;
+
+ sa_iova = sa_info->iova;
+ if (!sa_iova)
+ return -EINVAL;
+
+ res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s),
+ &res_iova, GFP_ATOMIC);
+ if (!res)
+ return -ENOMEM;
+
+ sa_size = sizeof(struct cn10k_tx_sa_s);
+ sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC);
+ if (!sa_dptr) {
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res,
+ res_iova);
+ return -ENOMEM;
+ }
+
+ sptr = (__force u64 *)sa_info->base;
+ dptr = (__force u64 *)sa_dptr;
+ for (off = 0; off < (sa_size / 8); off++)
+ *(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off));
+
+ res->compcode = CN10K_CPT_COMP_E_NOTDONE;
+ inst.res_addr = res_iova;
+ inst.dptr = (u64)dptr_iova;
+ inst.param2 = sa_size >> 3;
+ inst.dlen = sa_size;
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA;
+ inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA;
+ inst.cptr = sa_iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* Check if CPT-LF available */
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ dma_wmb();
+ ret = cn10k_wait_for_cpt_respose(pf, res);
+ if (ret)
+ goto set_available;
+
+ /* Trigger CTX flush to write dirty data back to DRAM */
+ reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7);
+ otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val);
+
+set_available:
+ cn10k_cpt_device_set_available(pf);
+free_mem:
+ dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova);
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova);
+ return ret;
+}
+
+static int cn10k_ipsec_get_hw_ctx_offset(void)
+{
+ /* Offset on Hardware-context offset in word */
+ return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F;
+}
+
+static int cn10k_ipsec_get_ctx_push_size(void)
+{
+ /* Context push size is round up and in multiple of 8 Byte */
+ return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F;
+}
+
+static int cn10k_ipsec_get_aes_key_len(int key_len)
+{
+ /* key_len is aes key length in bytes */
+ switch (key_len) {
+ case 16:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_128;
+ case 24:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_192;
+ default:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_256;
+ }
+}
+
+static void cn10k_outb_prepare_sa(struct xfrm_state *x,
+ struct cn10k_tx_sa_s *sa_entry)
+{
+ int key_len = (x->aead->alg_key_len + 7) / 8;
+ struct net_device *netdev = x->xso.dev;
+ u8 *key = x->aead->alg_key;
+ struct otx2_nic *pf;
+ u32 *tmp_salt;
+ u64 *tmp_key;
+ int idx;
+
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+
+ /* context size, 128 Byte aligned up */
+ pf = netdev_priv(netdev);
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset();
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+
+ /* Ucode to skip two words of CPT_CTX_HW_S */
+ sa_entry->ctx_hdr_size = 1;
+
+ /* Allow Atomic operation (AOP) */
+ sa_entry->aop_valid = 1;
+
+ /* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with */
+ sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB;
+ sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP;
+ sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM;
+ sa_entry->iv_src = CN10K_IPSEC_SA_IV_SRC_PACKET;
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL;
+ else
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT;
+
+ /* Last 4 bytes are salt */
+ key_len -= 4;
+ sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len);
+ memcpy(sa_entry->cipher_key, key, key_len);
+ tmp_key = (u64 *)sa_entry->cipher_key;
+
+ for (idx = 0; idx < key_len / 8; idx++)
+ tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]);
+
+ memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4);
+ tmp_salt = (u32 *)&sa_entry->iv_gcm_salt;
+ *tmp_salt = (__force u32)cpu_to_be32(*tmp_salt);
+
+ /* Write SA context data to memory before enabling */
+ wmb();
+
+ /* Enable SA */
+ sa_entry->sa_valid = 1;
+}
+
+static int cn10k_ipsec_validate_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload authenticated xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only AES-GCM-ICV16 xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload compressed xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET && x->props.family != AF_INET6) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only IPv4/v6 xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload other than crypto-mode");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only tunnel/transport xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only ESP xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulated xfrm state may not be offloaded");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without aead");
+ return -EINVAL;
+ }
+
+ if (x->aead->alg_icv_len != 128) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD ICV length other than 128bit");
+ return -EINVAL;
+ }
+ if (x->aead->alg_key_len != 128 + 32 &&
+ x->aead->alg_key_len != 192 + 32 &&
+ x->aead->alg_key_len != 256 + 32) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD key length other than 128/192/256bit");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with tfc padding");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without geniv");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with geniv other than seqiv");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int cn10k_ipsec_inb_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "xfrm inbound offload not supported");
+ return -EOPNOTSUPP;
+}
+
+static int cn10k_ipsec_outb_add_state(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ err = cn10k_ipsec_validate_state(x, extack);
+ if (err)
+ return err;
+
+ pf = netdev_priv(dev);
+
+ err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN);
+ if (err)
+ return err;
+
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ cn10k_outb_prepare_sa(x, sa_entry);
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error writing outbound SA");
+ qmem_free(pf->dev, sa_info);
+ return err;
+ }
+
+ x->xso.offload_handle = (unsigned long)sa_info;
+ /* Enable static branch when first SA setup */
+ if (!pf->ipsec.outb_sa_count)
+ static_branch_enable(&cn10k_ipsec_sa_enabled);
+ pf->ipsec.outb_sa_count++;
+ return 0;
+}
+
+static int cn10k_ipsec_add_state(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return cn10k_ipsec_inb_add_state(x, extack);
+ else
+ return cn10k_ipsec_outb_add_state(dev, x, extack);
+}
+
+static void cn10k_ipsec_del_state(struct net_device *dev, struct xfrm_state *x)
+{
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return;
+
+ pf = netdev_priv(dev);
+
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+ /* Disable SA in CPT h/w */
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->aop_valid = 1;
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err)
+ netdev_err(dev, "Error (%d) deleting SA\n", err);
+
+ x->xso.offload_handle = 0;
+ qmem_free(pf->dev, sa_info);
+
+ /* If no more SA's then update netdev feature for potential change
+ * in NETIF_F_HW_ESP.
+ */
+ if (!--pf->ipsec.outb_sa_count)
+ queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work);
+}
+
+static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = cn10k_ipsec_add_state,
+ .xdo_dev_state_delete = cn10k_ipsec_del_state,
+};
+
+static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
+{
+ struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec,
+ sa_work);
+ struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec);
+
+ /* Disable static branch when no more SA enabled */
+ static_branch_disable(&cn10k_ipsec_sa_enabled);
+ rtnl_lock();
+ netdev_update_features(pf->netdev);
+ rtnl_unlock();
+}
+
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ /* IPsec offload supported on cn10k */
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return -EOPNOTSUPP;
+
+ /* Initialize CPT for outbound ipsec offload */
+ if (enable)
+ return cn10k_outb_cpt_init(netdev);
+
+ /* Don't do CPT cleanup if SA installed */
+ if (pf->ipsec.outb_sa_count) {
+ netdev_err(pf->netdev, "SA installed on this device\n");
+ return -EBUSY;
+ }
+
+ return cn10k_outb_cpt_clean(pf);
+}
+
+int cn10k_ipsec_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ u32 sa_size;
+
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return 0;
+
+ /* Each SA entry size is 128 Byte round up in size */
+ sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ?
+ (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) *
+ OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s);
+ pf->ipsec.sa_size = sa_size;
+
+ INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler);
+ pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0);
+ if (!pf->ipsec.sa_workq) {
+ netdev_err(pf->netdev, "SA alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ /* Set xfrm device ops */
+ netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;
+ netdev->hw_features |= NETIF_F_HW_ESP;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_ipsec_init);
+
+void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return;
+
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ return;
+
+ if (pf->ipsec.sa_workq) {
+ destroy_workqueue(pf->ipsec.sa_workq);
+ pf->ipsec.sa_workq = NULL;
+ }
+
+ cn10k_outb_cpt_clean(pf);
+}
+EXPORT_SYMBOL(cn10k_ipsec_clean);
+
+static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ u8 *src;
+
+ src = (u8 *)skb->data + ETH_HLEN;
+
+ if (x->props.family == AF_INET) {
+ iph = (struct iphdr *)src;
+ return ntohs(iph->tot_len);
+ }
+
+ ipv6h = (struct ipv6hdr *)src;
+ return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr);
+}
+
+/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure.
+ * SG of NIX and CPT are same in size.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ struct cpt_sg_s *cpt_sg = NULL;
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 dma_addr, *iova = NULL;
+ u64 *cpt_iova = NULL;
+ u16 *sg_lens = NULL;
+ int seg, len;
+
+ sq->sg[sq->head].num_segs = 0;
+ cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size);
+
+ for (seg = 0; seg < num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ cpt_sg += (seg / MAX_SEGS_PER_SG) * 4;
+ cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg);
+ }
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ return false;
+
+ sg_lens[seg % MAX_SEGS_PER_SG] = len;
+ sg->segs++;
+ *iova++ = dma_addr;
+ *cpt_iova++ = dma_addr;
+
+ /* Save DMA mapping info for later unmapping */
+ sq->sg[sq->head].dma_addr[seg] = dma_addr;
+ sq->sg[sq->head].size[seg] = len;
+ sq->sg[sq->head].num_segs++;
+
+ *cpt_sg = *(struct cpt_sg_s *)sg;
+ cpt_sg->rsvd_63_50 = 0;
+ }
+
+ sq->sg[sq->head].skb = (u64)skb;
+ return true;
+}
+
+static u16 cn10k_ipsec_get_param1(u8 iv_offset)
+{
+ u16 param1_val;
+
+ /* Set Crypto mode, disable L3/L4 checksum */
+ param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM |
+ CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM;
+ param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT;
+ return param1_val;
+}
+
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ struct cpt_inst_s inst;
+ struct cpt_res_s *res;
+ struct xfrm_state *x;
+ struct qmem *sa_info;
+ dma_addr_t dptr_iova;
+ struct sec_path *sp;
+ u8 encap_offset;
+ u8 auth_offset;
+ u8 gthr_size;
+ u8 iv_offset;
+ u16 dlen;
+
+ /* Check for IPSEC offload enabled */
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ goto drop;
+
+ sp = skb_sec_path(skb);
+ if (unlikely(!sp->len))
+ goto drop;
+
+ x = xfrm_input_state(skb);
+ if (unlikely(!x))
+ goto drop;
+
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL)
+ goto drop;
+
+ dlen = cn10k_ipsec_get_ip_data_len(x, skb);
+ if (dlen == 0 && netif_msg_tx_err(pf)) {
+ netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n");
+ goto drop;
+ }
+
+ /* Check for valid SA context */
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ if (!sa_info)
+ goto drop;
+
+ memset(&inst, 0, sizeof(struct cpt_inst_s));
+
+ /* Get authentication offset */
+ if (x->props.family == AF_INET)
+ auth_offset = sizeof(struct iphdr);
+ else
+ auth_offset = sizeof(struct ipv6hdr);
+
+ /* IV offset is after ESP header */
+ iv_offset = auth_offset + sizeof(struct ip_esp_hdr);
+ /* Encap will start after IV */
+ encap_offset = iv_offset + GCM_RFC4106_IV_SIZE;
+
+ /* CPT Instruction word-1 */
+ res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head));
+ res->compcode = 0;
+ inst.res_addr = sq->cpt_resp->iova + (64 * sq->head);
+
+ /* CPT Instruction word-2 */
+ inst.rvu_pf_func = pf->pcifunc;
+
+ /* CPT Instruction word-3:
+ * Set QORD to force CPT_RES_S write completion
+ */
+ inst.qord = 1;
+
+ /* CPT Instruction word-4 */
+ /* inst.dlen should not include ICV length */
+ inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8);
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC;
+ inst.param1 = cn10k_ipsec_get_param1(iv_offset);
+
+ inst.param2 = encap_offset <<
+ CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT;
+ inst.param2 |= (u16)auth_offset <<
+ CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT;
+
+ /* CPT Instruction word-5 */
+ gthr_size = num_segs / MAX_SEGS_PER_SG;
+ gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size;
+
+ gthr_size &= 0xF;
+ dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2)));
+ inst.dptr = dptr_iova | ((u64)gthr_size << 60);
+
+ /* CPT Instruction word-6 */
+ inst.rptr = inst.dptr;
+
+ /* CPT Instruction word-7 */
+ inst.cptr = sa_info->iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* CPT Instruction word-0 */
+ inst.nixtxl = (size / 16) - 1;
+ inst.dat_offset = ETH_HLEN;
+ inst.nixtx_offset = sq->sqe_size;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Finally Flush the CPT instruction */
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ return true;
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
new file mode 100644
index 000000000000..9965df0faa3e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#ifndef CN10K_IPSEC_H
+#define CN10K_IPSEC_H
+
+#include <linux/types.h>
+
+DECLARE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
+/* CPT instruction size in bytes */
+#define CN10K_CPT_INST_SIZE 64
+
+/* CPT instruction (CPT_INST_S) queue length */
+#define CN10K_CPT_INST_QLEN 8200
+
+/* CPT instruction queue size passed to HW is in units of
+ * 40*CPT_INST_S messages.
+ */
+#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40)
+
+/* CPT needs 320 free entries */
+#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE)
+#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40)
+
+/* CPT instruction queue length in bytes */
+#define CN10K_CPT_INST_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \
+ CN10K_CPT_INST_QLEN_EXTRA_BYTES)
+
+/* CPT instruction group queue length in bytes */
+#define CN10K_CPT_INST_GRP_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16)
+
+/* CPT FC length in bytes */
+#define CN10K_CPT_Q_FC_LEN 128
+
+/* Default CPT engine group for ipsec offload */
+#define CN10K_DEF_CPT_IPSEC_EGRP 1
+
+/* CN10K CPT LF registers */
+#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT)
+#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10)
+#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40)
+#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0)
+#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100)
+#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110)
+#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120)
+#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3)
+#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510)
+
+/* IPSEC Instruction opcodes */
+#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL
+#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL
+#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x2AUL
+
+enum cn10k_cpt_comp_e {
+ CN10K_CPT_COMP_E_NOTDONE = 0x00,
+ CN10K_CPT_COMP_E_GOOD = 0x01,
+ CN10K_CPT_COMP_E_FAULT = 0x02,
+ CN10K_CPT_COMP_E_HWERR = 0x04,
+ CN10K_CPT_COMP_E_INSTERR = 0x05,
+ CN10K_CPT_COMP_E_WARN = 0x06,
+ CN10K_CPT_COMP_E_MASK = 0x3F
+};
+
+struct cn10k_cpt_inst_queue {
+ u8 *vaddr;
+ u8 *real_vaddr;
+ dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ u32 size;
+};
+
+enum cn10k_cpt_hw_state_e {
+ CN10K_CPT_HW_UNAVAILABLE,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE
+};
+
+struct cn10k_ipsec {
+ /* Outbound CPT */
+ u64 io_addr;
+ atomic_t cpt_state;
+ struct cn10k_cpt_inst_queue iq;
+
+ /* SA info */
+ u32 sa_size;
+ u32 outb_sa_count;
+ struct work_struct sa_work;
+ struct workqueue_struct *sa_workq;
+};
+
+/* CN10K IPSEC Security Association (SA) */
+/* SA direction */
+#define CN10K_IPSEC_SA_DIR_INB 0
+#define CN10K_IPSEC_SA_DIR_OUTB 1
+/* SA protocol */
+#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0
+#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1
+/* SA Encryption Type */
+#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5
+/* SA IPSEC mode Transport/Tunnel */
+#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0
+#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1
+/* SA AES Key Length */
+#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1
+#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2
+#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3
+/* IV Source */
+#define CN10K_IPSEC_SA_IV_SRC_COUNTER 0
+#define CN10K_IPSEC_SA_IV_SRC_PACKET 3
+
+struct cn10k_tx_sa_s {
+ u64 esn_en : 1; /* W0 */
+ u64 rsvd_w0_1_8 : 8;
+ u64 hw_ctx_off : 7;
+ u64 ctx_id : 16;
+ u64 rsvd_w0_32_47 : 16;
+ u64 ctx_push_size : 7;
+ u64 rsvd_w0_55 : 1;
+ u64 ctx_hdr_size : 2;
+ u64 aop_valid : 1;
+ u64 rsvd_w0_59 : 1;
+ u64 ctx_size : 4;
+ u64 w1; /* W1 */
+ u64 sa_valid : 1; /* W2 */
+ u64 sa_dir : 1;
+ u64 rsvd_w2_2_3 : 2;
+ u64 ipsec_mode : 1;
+ u64 ipsec_protocol : 1;
+ u64 aes_key_len : 2;
+ u64 enc_type : 3;
+ u64 rsvd_w2_11_19 : 9;
+ u64 iv_src : 2;
+ u64 rsvd_w2_22_31 : 10;
+ u64 rsvd_w2_32_63 : 32;
+ u64 w3; /* W3 */
+ u8 cipher_key[32]; /* W4 - W7 */
+ u32 rsvd_w8_0_31; /* W8 : IV */
+ u32 iv_gcm_salt;
+ u64 rsvd_w9_w30[22]; /* W9 - W30 */
+ u64 hw_ctx[6]; /* W31 - W36 */
+};
+
+/* CPT instruction parameter-1 */
+#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1
+#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2
+#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20
+#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8
+
+/* CPT instruction parameter-2 */
+#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0
+#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8
+
+/* CPT Instruction Structure */
+struct cpt_inst_s {
+ u64 nixtxl : 3; /* W0 */
+ u64 doneint : 1;
+ u64 rsvd_w0_4_15 : 12;
+ u64 dat_offset : 8;
+ u64 ext_param1 : 8;
+ u64 nixtx_offset : 20;
+ u64 rsvd_w0_52_63 : 12;
+ u64 res_addr; /* W1 */
+ u64 tag : 32; /* W2 */
+ u64 tt : 2;
+ u64 grp : 10;
+ u64 rsvd_w2_44_47 : 4;
+ u64 rvu_pf_func : 16;
+ u64 qord : 1; /* W3 */
+ u64 rsvd_w3_1_2 : 2;
+ u64 wqe_ptr : 61;
+ u64 dlen : 16; /* W4 */
+ u64 param2 : 16;
+ u64 param1 : 16;
+ u64 opcode_major : 8;
+ u64 opcode_minor : 8;
+ u64 dptr; /* W5 */
+ u64 rptr; /* W6 */
+ u64 cptr : 60; /* W7 */
+ u64 ctx_val : 1;
+ u64 egrp : 3;
+};
+
+/* CPT Instruction Result Structure */
+struct cpt_res_s {
+ u64 compcode : 7; /* W0 */
+ u64 doneint : 1;
+ u64 uc_compcode : 8;
+ u64 uc_info : 48;
+ u64 esn; /* W1 */
+};
+
+/* CPT SG structure */
+struct cpt_sg_s {
+ u64 seg1_size : 16;
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_63_50 : 14;
+};
+
+/* CPT LF_INPROG Register */
+#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0)
+#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32)
+#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40)
+
+/* CPT LF_Q_GRP_PTR Register */
+#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0)
+#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
+
+/* CPT LF CTX Flush Register */
+#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0)
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int cn10k_ipsec_init(struct net_device *netdev);
+void cn10k_ipsec_clean(struct otx2_nic *pf);
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable);
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset);
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size);
+#else
+static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev)
+{
+ return 0;
+}
+
+static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+}
+
+static inline __maybe_unused
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ return 0;
+}
+
+static inline bool __maybe_unused
+otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ return true;
+}
+
+static inline bool __maybe_unused
+cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ return true;
+}
+#endif
+#endif // CN10K_IPSEC_H
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 6cc7a78968fc..4c7e0f345cb5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -133,9 +133,7 @@ static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
return "SA";
default:
return "Unknown";
- };
-
- return "Unknown";
+ }
}
static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
@@ -533,7 +531,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
if (sw_tx_sc->encrypt)
sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
- policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU,
+ pfvf->netdev->mtu + OTX2_ETH_HLEN);
/* Write SecTag excluding AN bits(1..0) */
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 523ecb798a7a..6b5c9536d26d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -10,11 +10,19 @@
#include <net/page_pool/helpers.h>
#include <net/tso.h>
#include <linux/bitfield.h>
+#include <linux/dcbnl.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
+
+static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
+{
+ return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en;
+}
static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
@@ -323,6 +331,10 @@ int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
rss_ctx = rss->rss_ctx[ctx_id];
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore the queue if AF_XDP zero copy is enabled */
+ if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
+
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
/* The shared memory buffer can be full.
@@ -542,10 +554,13 @@ static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
u8 *buf;
+ if (pool->xsk_pool)
+ return otx2_xsk_pool_alloc_buf(pfvf, pool, dma, idx);
+
if (pool->page_pool)
return otx2_alloc_pool_buf(pfvf, pool, dma);
@@ -564,12 +579,12 @@ static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
int ret;
local_bh_disable();
- ret = __otx2_alloc_rbuf(pfvf, pool, dma);
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma, qidx, idx);
local_bh_enable();
return ret;
}
@@ -577,7 +592,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma)
{
- if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma,
+ cq->cq_idx, cq->pool_ptrs - 1)))
return -ENOMEM;
return 0;
}
@@ -877,7 +893,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
-static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
struct nix_aq_enq_req *aq;
@@ -964,6 +980,29 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
+ /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG.
+ * SG of NIX and CPT are same in size. Allocate memory for CPT SG
+ * same as NIX SQE for base address alignment.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+ err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt,
+ sq->sqe_size * 2);
+ if (err)
+ return err;
+
+ err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64);
+ if (err)
+ return err;
+
if (qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
TSO_HEADER_SIZE);
@@ -998,6 +1037,10 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
+ /* Attach XSK_BUFF_POOL to XDP queue */
+ if (qidx > pfvf->hw.xdp_queues)
+ otx2_attach_xsk_buff(pfvf, sq, (qidx - pfvf->hw.xdp_queues));
+
chan_offset = qidx % pfvf->hw.tx_chan_cnt;
err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura);
@@ -1011,12 +1054,13 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
}
-static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
@@ -1025,8 +1069,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- if (pfvf->xdp_prog)
+ if (pfvf->xdp_prog) {
xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
+ pool = &qset->pool[qidx];
+ if (pool->xsk_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ xsk_pool_set_rxq_info(pool->xsk_pool, &cq->xdp_rxq);
+ } else if (pool->page_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool->page_pool);
+ }
+ }
} else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
@@ -1245,9 +1301,10 @@ void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_head_page(phys_to_virt(pa));
-
if (pool->page_pool) {
page_pool_put_full_page(pool->page_pool, page, true);
+ } else if (pool->xsk_pool) {
+ /* Note: No way of identifying xdp_buff */
} else {
dma_unmap_page_attrs(pfvf->dev, iova, size,
DMA_FROM_DEVICE,
@@ -1262,6 +1319,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
int pool_id, pool_start = 0, pool_end = 0, size = 0;
struct otx2_pool *pool;
u64 iova;
+ int idx;
if (type == AURA_NIX_SQ) {
pool_start = otx2_get_pool_idx(pfvf, type, 0);
@@ -1276,16 +1334,21 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
/* Free SQB and RQB pointers from the aura pool */
for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
- iova = otx2_aura_allocptr(pfvf, pool_id);
pool = &pfvf->qset.pool[pool_id];
+ iova = otx2_aura_allocptr(pfvf, pool_id);
while (iova) {
if (type == AURA_NIX_RQ)
iova -= OTX2_HEAD_ROOM;
-
otx2_free_bufs(pfvf, pool, iova, size);
-
iova = otx2_aura_allocptr(pfvf, pool_id);
}
+
+ for (idx = 0 ; idx < pool->xdp_cnt; idx++) {
+ if (!pool->xdp[idx])
+ continue;
+
+ xsk_buff_free(pool->xdp[idx]);
+ }
}
}
@@ -1302,7 +1365,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
qmem_free(pfvf->dev, pool->stack);
qmem_free(pfvf->dev, pool->fc_addr);
page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
+ devm_kfree(pfvf->dev, pool->xdp);
+ pool->xsk_pool = NULL;
}
devm_kfree(pfvf->dev, pfvf->qset.pool);
pfvf->qset.pool = NULL;
@@ -1389,6 +1453,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size, int type)
{
struct page_pool_params pp_params = { 0 };
+ struct xsk_buff_pool *xsk_pool;
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
int err;
@@ -1432,21 +1497,35 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
aq->ctype = NPA_AQ_CTYPE_POOL;
aq->op = NPA_AQ_INSTOP_INIT;
- if (type != AURA_NIX_RQ) {
- pool->page_pool = NULL;
+ if (type != AURA_NIX_RQ)
+ return 0;
+
+ if (!test_bit(pool_id, pfvf->af_xdp_zc_qidx)) {
+ pp_params.order = get_order(buf_size);
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pfvf->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pool->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool->page_pool)) {
+ netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+ return PTR_ERR(pool->page_pool);
+ }
return 0;
}
- pp_params.order = get_order(buf_size);
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
- pp_params.nid = NUMA_NO_NODE;
- pp_params.dev = pfvf->dev;
- pp_params.dma_dir = DMA_FROM_DEVICE;
- pool->page_pool = page_pool_create(&pp_params);
- if (IS_ERR(pool->page_pool)) {
- netdev_err(pfvf->netdev, "Creation of page pool failed\n");
- return PTR_ERR(pool->page_pool);
+ /* Set XSK pool to support AF_XDP zero-copy */
+ xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, pool_id);
+ if (xsk_pool) {
+ pool->xsk_pool = xsk_pool;
+ pool->xdp_cnt = numptrs;
+ pool->xdp = devm_kcalloc(pfvf->dev,
+ numptrs, sizeof(struct xdp_buff *), GFP_KERNEL);
+ if (IS_ERR(pool->xdp)) {
+ netdev_err(pfvf->netdev, "Creation of xsk pool failed\n");
+ return PTR_ERR(pool->xdp);
+ }
}
return 0;
@@ -1507,9 +1586,18 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ ptr--;
+ while (ptr >= 0) {
+ xsk_buff_free(pool->xdp[ptr]);
+ ptr--;
+ }
+ }
goto err_mem;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
@@ -1559,11 +1647,19 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
/* Allocate pointers and free them to aura/pool */
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
+
for (ptr = 0; ptr < num_ptrs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ while (ptr)
+ xsk_buff_free(pool->xdp[--ptr]);
+ }
return -ENOMEM;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ pool->xsk_pool ? bufptr :
bufptr + OTX2_HEAD_ROOM);
}
}
@@ -1722,18 +1818,43 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
-#ifdef CONFIG_DCB
- req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
- req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
-#else
- req->chan_cnt = 1;
- req->bpid_per_chan = 0;
-#endif
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = pfvf->hw.rx_chan_cnt;
+ req->bpid_per_chan = 0;
+ }
return otx2_sync_mbox_msg(&pfvf->mbox);
}
EXPORT_SYMBOL(otx2_nix_config_bp);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = pfvf->hw.rx_chan_cnt;
+ req->bpid_per_chan = 0;
+ }
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+EXPORT_SYMBOL(otx2_nix_cpt_config_bp);
+
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp)
@@ -1934,6 +2055,43 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t
}
EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+int otx2_set_hw_capabilities(struct otx2_nic *pfvf)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct get_hw_cap_rsp *rsp;
+ struct msg_req *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_get_hw_cap(mbox);
+ if (!req)
+ goto fail;
+
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct get_hw_cap_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (rsp->hw_caps & HW_CAP_MACSEC)
+ __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Cannot get MACSEC capability from AF\n");
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
@@ -1947,3 +2105,48 @@ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ const skb_frag_t *frag;
+ struct page *page;
+ int offset;
+
+ /* Crypto hardware need write permission for ipsec crypto offload */
+ if (unlikely(xfrm_offload(skb))) {
+ dir = DMA_BIDIRECTIONAL;
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ }
+
+ /* First segment is always skb->data */
+ if (!seg) {
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ *len = skb_headlen(skb);
+ } else {
+ frag = &skb_shinfo(skb)->frags[seg - 1];
+ page = skb_frag_page(frag);
+ offset = skb_frag_off(frag);
+ *len = skb_frag_size(frag);
+ }
+ return otx2_dma_map_page(pfvf, page, offset, *len, dir);
+}
+
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ struct sk_buff *skb = NULL;
+ int seg;
+
+ skb = (struct sk_buff *)sg->skb;
+ if (unlikely(xfrm_offload(skb)))
+ dir = DMA_BIDIRECTIONAL;
+
+ for (seg = 0; seg < sg->num_segs; seg++) {
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], dir);
+ }
+ sg->num_segs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 566848663fea..ca0e6ab12ceb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -21,6 +21,7 @@
#include <linux/time64.h>
#include <linux/dim.h>
#include <uapi/linux/if_macsec.h>
+#include <net/page_pool/helpers.h>
#include <mbox.h>
#include <npc.h>
@@ -30,6 +31,7 @@
#include <rvu_trace.h>
#include "qos.h"
#include "rep.h"
+#include "cn10k_ipsec.h"
/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE 0x20
@@ -40,6 +42,7 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7
@@ -55,6 +58,9 @@
#define NIX_PF_PFC_PRIO_MAX 8
#endif
+/* Number of segments per SG structure */
+#define MAX_SEGS_PER_SG 3
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -123,6 +129,12 @@ enum otx2_errcodes_re {
ERRCODE_IL4_CSUM = 0x22,
};
+enum otx2_xdp_action {
+ OTX2_XDP_TX = BIT(0),
+ OTX2_XDP_REDIRECT = BIT(1),
+ OTX2_AF_XDP_FRAME = BIT(2),
+};
+
struct otx2_dev_stats {
u64 rx_bytes;
u64 rx_frames;
@@ -344,6 +356,7 @@ struct otx2_flow_config {
struct list_head flow_list_tc;
u8 ucast_flt_cnt;
bool ntuple;
+ u16 ntuple_cnt;
};
struct dev_hw_ops {
@@ -448,6 +461,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18)
#define OTX2_FLAG_PORT_UP BIT_ULL(19)
+#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20)
u64 flags;
u64 *cq_op_addr;
@@ -499,9 +513,9 @@ struct otx2_nic {
/* Devlink */
struct otx2_devlink *dl;
-#ifdef CONFIG_DCB
/* PFC */
u8 pfc_en;
+#ifdef CONFIG_DCB
u8 *queue_to_pfc_map;
u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
@@ -522,6 +536,11 @@ struct otx2_nic {
u16 rep_pf_map[RVU_MAX_REP];
u16 esw_mode;
#endif
+
+ /* Inline ipsec */
+ struct cn10k_ipsec ipsec;
+ /* af_xdp zero-copy */
+ unsigned long *af_xdp_zc_qidx;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -572,6 +591,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
}
+static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
+{
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF &&
+ (pdev->revision & 0xFF) == 0x54)
+ return true;
+
+ return false;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -604,9 +632,6 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
__set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag);
}
-
- if (is_dev_cn10kb(pfvf->pdev))
- __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
}
/* Register read/write APIs */
@@ -621,6 +646,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
case BLKTYPE_NPA:
blkaddr = BLKADDR_NPA;
break;
+ case BLKTYPE_CPT:
+ blkaddr = BLKADDR_CPT0;
+ break;
default:
blkaddr = BLKADDR_RVUM;
break;
@@ -841,6 +869,7 @@ static struct _req_type __maybe_unused \
*otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
{ \
struct _req_type *req; \
+ u16 pcifunc = mbox->pfvf->pcifunc; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
&mbox->mbox, 0, sizeof(struct _req_type), \
@@ -849,7 +878,8 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
- trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
+ req->hdr.pcifunc = pcifunc; \
+ trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \
return req; \
}
@@ -981,10 +1011,11 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
void otx2_free_pending_sqe(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma);
+ dma_addr_t *dma, int qidx, int idx);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
@@ -1010,6 +1041,9 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf);
void otx2_disable_mbox_intr(struct otx2_nic *pf);
void otx2_disable_napi(struct otx2_nic *pf);
irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura);
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx);
+int otx2_set_hw_capabilities(struct otx2_nic *pfvf);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
@@ -1072,7 +1106,10 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags);
+void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
@@ -1149,4 +1186,9 @@ static inline int mcam_entry_cmp(const void *a, const void *b)
{
return *(u16 *)a - *(u16 *)b;
}
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len);
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
+int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 294fba58b670..f110dfa42360 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -435,6 +435,9 @@ process_pfc:
return err;
}
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pfvf, false);
+
/* Request Per channel Bpids */
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, true);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 33ec9a7f7c03..e13ae5484c19 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -41,6 +41,7 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
if (!pfvf->flow_cfg)
return 0;
+ pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16;
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 2d53dc77ef1e..45b8c9230184 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -315,7 +315,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_pause_frm_cfg *req, *rsp;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return;
mutex_lock(&pfvf->mbox.lock);
@@ -347,7 +347,7 @@ static int otx2_set_pauseparam(struct net_device *netdev,
if (pause->autoneg)
return -EOPNOTSUPP;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return -EOPNOTSUPP;
if (pause->rx_pause)
@@ -910,8 +910,12 @@ static int otx2_get_rxfh(struct net_device *dev,
return -ENOENT;
if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++)
+ for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore if the rx queue is AF_XDP zero copy enabled */
+ if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
indir[idx] = rss_ctx->ind_tbl[idx];
+ }
}
if (rxfh->key)
memcpy(rxfh->key, rss->key, sizeof(rss->key));
@@ -937,8 +941,8 @@ static u32 otx2_get_link(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- /* LBK link is internal and always UP */
- if (is_otx2_lbkvf(pfvf->pdev))
+ /* LBK and SDP links are internal and always UP */
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return 1;
return pfvf->linfo.link_up;
}
@@ -1409,7 +1413,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- if (is_otx2_lbkvf(pfvf->pdev)) {
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) {
cmd->base.duplex = DUPLEX_FULL;
cmd->base.speed = SPEED_100000;
} else {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 47bfd1fb37d4..64c6d9162ef6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -247,7 +247,7 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
/* Allocate entries for Ntuple filters */
- count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt);
if (count <= 0) {
otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
return 0;
@@ -307,6 +307,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS;
+ pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT;
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e310f99b1736..db7c466fdc39 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -26,6 +26,8 @@
#include "cn10k.h"
#include "qos.h"
#include <rvu_trace.h>
+#include "cn10k_ipsec.h"
+#include "otx2_xsk.h"
#define DRV_NAME "rvu_nicpf"
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -463,6 +465,9 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-VF down queue handler(forwarding)",
+ vf_mbox->num_msgs);
+
for (id = 0; id < vf_mbox->num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
offset);
@@ -471,7 +476,7 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
goto inval_msg;
/* Set VF's number in each of the msg */
- msg->pcifunc &= RVU_PFVF_FUNC_MASK;
+ msg->pcifunc &= ~RVU_PFVF_FUNC_MASK;
msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
offset = msg->next_msgoff;
}
@@ -501,6 +506,9 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-VF up queue handler(response)",
+ vf_mbox->up_num_msgs);
+
for (id = 0; id < vf_mbox->up_num_msgs; id++) {
msg = mdev->mbase + offset;
@@ -817,6 +825,9 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
pf = af_mbox->pfvf;
+ trace_otx2_msg_status(pf->pdev, "PF-AF down queue handler(response)",
+ num_msgs);
+
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2_process_pfaf_mbox_msg(pf, msg);
@@ -972,6 +983,9 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-AF up queue handler(notification)",
+ num_msgs);
+
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
@@ -1021,6 +1035,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
BIT_ULL(0));
+
+ trace_otx2_msg_status(pf->pdev, "PF-AF up work queued(interrupt)",
+ hdr->num_msgs);
}
if (mbox_data & MBOX_DOWN_MSG) {
@@ -1037,6 +1054,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
BIT_ULL(0));
+
+ trace_otx2_msg_status(pf->pdev, "PF-AF down work queued(interrupt)",
+ hdr->num_msgs);
}
return IRQ_HANDLED;
@@ -1484,6 +1504,8 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
if (!sq->sqe)
continue;
qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->sqe_ring);
+ qmem_free(pf->dev, sq->cpt_resp);
qmem_free(pf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
@@ -1551,6 +1573,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf)
if (err)
goto err_free_npa_lf;
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pf, false);
+
/* Enable backpressure for CGX mapped PF/VFs */
if (!is_otx2_lbkvf(pf->pdev))
otx2_nix_config_bp(pf, true);
@@ -1656,9 +1681,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
- struct otx2_pool *pool;
struct msg_req *req;
- int pool_id;
int qidx;
/* Ensure all SQE are processed */
@@ -1699,13 +1722,6 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
/* Free RQ buffer pointers*/
otx2_free_aura_ptr(pf, AURA_NIX_RQ);
- for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
- pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
- pool = &pf->qset.pool[pool_id];
- page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
- }
-
otx2_free_cq_res(pf);
/* Free all ingress bandwidth profiles allocated */
@@ -2273,6 +2289,10 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
+ if (changed & NETIF_F_HW_ESP)
+ return cn10k_ipsec_ethtool_init(netdev,
+ features & NETIF_F_HW_ESP);
+
return otx2_handle_ntuple_tc_features(netdev, features);
}
@@ -2681,7 +2701,6 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
int qidx)
{
- struct page *page;
u64 dma_addr;
int err = 0;
@@ -2691,11 +2710,11 @@ static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
if (dma_mapping_error(pf->dev, dma_addr))
return -ENOMEM;
- err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ err = otx2_xdp_sq_append_pkt(pf, xdpf, dma_addr, xdpf->len,
+ qidx, OTX2_XDP_REDIRECT);
if (!err) {
otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
- page = virt_to_page(xdpf->data);
- put_page(page);
+ xdp_return_frame(xdpf);
return -ENOMEM;
}
return 0;
@@ -2779,6 +2798,8 @@ static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return otx2_xdp_setup(pf, xdp->prog);
+ case XDP_SETUP_XSK_POOL:
+ return otx2_xsk_pool_setup(pf, xdp->xsk.pool, xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -2856,6 +2877,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
.ndo_bpf = otx2_xdp,
+ .ndo_xsk_wakeup = otx2_xsk_wakeup,
.ndo_xdp_xmit = otx2_xdp_xmit,
.ndo_setup_tc = otx2_setup_tc,
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
@@ -3044,7 +3066,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -3053,7 +3075,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ return err;
}
pci_set_master(pdev);
@@ -3063,10 +3085,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
- if (!netdev) {
- err = -ENOMEM;
- goto err_release_regions;
- }
+ if (!netdev)
+ return -ENOMEM;
pci_set_drvdata(pdev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -3124,6 +3144,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_ptp_destroy;
+ otx2_set_hw_capabilities(pf);
+
err = cn10k_mcs_init(pf);
if (err)
goto err_del_mcam_entries;
@@ -3162,10 +3184,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* reset CGX/RPM MAC stats */
otx2_reset_mac_stats(pf);
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_mcs_free;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_mcs_free;
+ goto err_ipsec_clean;
}
err = otx2_wq_init(pf);
@@ -3190,22 +3216,36 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
+ pf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!pf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_sriov_cleannup;
+ }
+
#ifdef CONFIG_DCB
err = otx2_dcbnl_set_ops(netdev);
if (err)
- goto err_pf_sriov_init;
+ goto err_free_zc_bmap;
#endif
otx2_qos_init(pf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(pf->af_xdp_zc_qidx);
+#endif
+err_sriov_cleannup:
+ otx2_sriov_vfcfg_cleanup(pf);
err_pf_sriov_init:
otx2_shutdown_tc(pf);
err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(pf);
err_mcs_free:
cn10k_mcs_free(pf);
err_del_mcam_entries:
@@ -3224,8 +3264,6 @@ err_detach_rsrc:
err_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-err_release_regions:
- pci_release_regions(pdev);
return err;
}
@@ -3267,6 +3305,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
req = (struct cgx_link_info_msg *)msghdr;
req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = pf->pcifunc;
memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
@@ -3403,6 +3442,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_unregister_dl(pf);
unregister_netdev(netdev);
+ cn10k_ipsec_clean(pf);
cn10k_mcs_free(pf);
otx2_sriov_disable(pf->pdev);
otx2_sriov_vfcfg_cleanup(pf);
@@ -3424,8 +3464,6 @@ static void otx2_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pf->pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-
- pci_release_regions(pdev);
}
static struct pci_driver otx2_pf_driver = {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 04bc06a80e23..99ace381cc78 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -11,6 +11,8 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
+#include <net/xfrm.h>
+#include <net/xdp.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -18,6 +20,7 @@
#include "otx2_txrx.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
#define PTP_PORT 0x13F
@@ -26,11 +29,30 @@
*/
#define PTP_SYNC_SEC_OFFSET 34
+DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
+static int otx2_get_free_sqe(struct otx2_snd_queue *sq)
+{
+ return (sq->cons_head - sq->head - 1 + sq->sqe_cnt)
+ & (sq->sqe_cnt - 1);
+}
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
- bool *need_xdp_flush);
+ u32 *metasize, bool *need_xdp_flush);
+
+static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq,
+ struct sk_buff *skb)
+{
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ sq->sqe_base = sq->sqe_ring->base + sq->sqe_size +
+ (sq->head * (sq->sqe_size * 2));
+ else
+ sq->sqe_base = sq->sqe->base;
+}
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
@@ -80,54 +102,24 @@ static unsigned int frag_num(unsigned int i)
#endif
}
-static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
- struct sk_buff *skb, int seg, int *len)
-{
- const skb_frag_t *frag;
- struct page *page;
- int offset;
-
- /* First segment is always skb->data */
- if (!seg) {
- page = virt_to_page(skb->data);
- offset = offset_in_page(skb->data);
- *len = skb_headlen(skb);
- } else {
- frag = &skb_shinfo(skb)->frags[seg - 1];
- page = skb_frag_page(frag);
- offset = skb_frag_off(frag);
- *len = skb_frag_size(frag);
- }
- return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
-}
-
-static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
-{
- int seg;
-
- for (seg = 0; seg < sg->num_segs; seg++) {
- otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
- sg->size[seg], DMA_TO_DEVICE);
- }
- sg->num_segs = 0;
-}
-
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
- struct nix_cqe_tx_s *cqe)
+ struct nix_cqe_tx_s *cqe,
+ int *xsk_frames)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct sg_list *sg;
- struct page *page;
- u64 pa;
sg = &sq->sg[snd_comp->sqe_id];
+ if (sg->flags & OTX2_AF_XDP_FRAME) {
+ (*xsk_frames)++;
+ return;
+ }
- pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
- otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
- sg->size[0], DMA_TO_DEVICE);
- page = virt_to_page(phys_to_virt(pa));
- put_page(page);
+ if (sg->flags & OTX2_XDP_REDIRECT)
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE);
+ xdp_return_frame((struct xdp_frame *)sg->skb);
+ sg->skb = (u64)NULL;
}
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
@@ -343,18 +335,24 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_parse_s *parse = &cqe->parse;
struct nix_rx_sg_s *sg = &cqe->sg;
struct sk_buff *skb = NULL;
+ u64 *word = (u64 *)parse;
void *end, *start;
+ u32 metasize = 0;
u64 *seg_addr;
u16 *seg_size;
int seg;
if (unlikely(parse->errlev || parse->errcode)) {
- if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
+ if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) {
+ trace_otx2_parse_dump(pfvf->pdev, "Err:", word);
return;
+ }
}
+ trace_otx2_parse_dump(pfvf->pdev, "", word);
if (pfvf->xdp_prog)
- if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq,
+ &metasize, need_xdp_flush))
return;
skb = napi_get_frags(napi);
@@ -386,6 +384,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
skb->mark = parse->match_id;
skb_mark_for_recycle(skb);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
napi_gro_frags(napi);
}
@@ -449,6 +449,18 @@ int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
return cnt - cq->pool_ptrs;
}
+static void otx2_zc_submit_pkts(struct otx2_nic *pfvf, struct xsk_buff_pool *xsk_pool,
+ int *xsk_frames, int qidx, int budget)
+{
+ if (*xsk_frames)
+ xsk_tx_completed(xsk_pool, *xsk_frames);
+
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
+
+ otx2_zc_napi_handler(pfvf, xsk_pool, qidx, budget);
+}
+
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
@@ -457,16 +469,22 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct nix_cqe_tx_s *cqe;
struct net_device *ndev;
int processed_cqe = 0;
+ int xsk_frames = 0;
+
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ sq = &pfvf->qset.sq[qidx];
if (cq->pend_cqe >= budget)
goto process_cqe;
- if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) {
+ if (sq->xsk_pool)
+ otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames,
+ qidx, budget);
return 0;
+ }
process_cqe:
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
- sq = &pfvf->qset.sq[qidx];
while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
@@ -476,10 +494,8 @@ process_cqe:
break;
}
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
-
if (cq->cq_type == CQ_XDP)
- otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
+ otx2_xdp_snd_pkt_handler(pfvf, sq, cqe, &xsk_frames);
else
otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
cqe, budget, &tx_pkts, &tx_bytes);
@@ -520,6 +536,10 @@ process_cqe:
netif_carrier_ok(ndev))
netif_tx_wake_queue(txq);
}
+
+ if (sq->xsk_pool)
+ otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames, qidx, budget);
+
return 0;
}
@@ -545,9 +565,10 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
struct otx2_cq_queue *rx_cq = NULL;
+ struct otx2_cq_queue *cq = NULL;
+ struct otx2_pool *pool = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
- struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
int filled_cnt = -1;
@@ -572,6 +593,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (rx_cq && rx_cq->pool_ptrs)
filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
+
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -584,20 +606,31 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+ if (likely(cq))
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
if (unlikely(!filled_cnt)) {
struct refill_work *work;
struct delayed_work *dwork;
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- work->napi = napi;
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
+ if (likely(cq)) {
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ work->napi = napi;
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ /* Call wake-up for not able to fill buffers */
+ if (pool->xsk_pool)
+ xsk_set_rx_need_wakeup(pool->xsk_pool);
}
} else {
+ /* Clear wake-up, since buffers are filled successfully */
+ if (pool && pool->xsk_pool)
+ xsk_clear_rx_need_wakeup(pool->xsk_pool);
/* Re-enable interrupts */
otx2_write64(pfvf,
NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
@@ -625,7 +658,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
sq->head &= (sq->sqe_cnt - 1);
}
-#define MAX_SEGS_PER_SG 3
/* Add SQE scatter/gather subdescriptor structure */
static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct sk_buff *skb, int num_segs, int *offset)
@@ -1161,11 +1193,12 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_nic *pfvf = dev;
+ bool ret;
/* Check if there is enough room between producer
* and consumer index.
*/
- free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
+ free_desc = otx2_get_free_sqe(sq);
if (free_desc < sq->sqe_thresh)
return false;
@@ -1177,6 +1210,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
/* If SKB doesn't fit in a single SQE, linearize it.
* TODO: Consider adding JUMP descriptor instead.
*/
+
if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb);
@@ -1196,6 +1230,9 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
return true;
}
+ /* Set sqe base address */
+ otx2_sq_set_sqe_base(sq, skb);
+
/* Set SQE's SEND_HDR.
* Do not clear the first 64bit as it contains constant info.
*/
@@ -1208,7 +1245,13 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
otx2_sqe_add_ext(pfvf, sq, skb, &offset);
/* Add SG subdesc with data frags */
- if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset);
+ else
+ ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset);
+
+ if (!ret) {
otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
return false;
}
@@ -1217,11 +1260,15 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
sqe_hdr->sizem1 = (offset / 16) - 1;
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs,
+ offset);
+
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
-
return true;
}
EXPORT_SYMBOL(otx2_sq_append_skb);
@@ -1234,15 +1281,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
u16 pool_id;
u64 iova;
- if (pfvf->xdp_prog)
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ if (pfvf->xdp_prog) {
+ if (pool->page_pool)
+ xdp_rxq_info_unreg_mem_model(&cq->xdp_rxq);
+
xdp_rxq_info_unreg(&cq->xdp_rxq);
+ }
if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return;
- pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
- pool = &pfvf->qset.pool[pool_id];
-
while (cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
@@ -1363,8 +1414,8 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf)
}
}
-static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
- int len, int *offset)
+void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags)
{
struct nix_sqe_sg_s *sg = NULL;
u64 *iova = NULL;
@@ -1381,16 +1432,34 @@ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
sq->sg[sq->head].dma_addr[0] = dma_addr;
sq->sg[sq->head].size[0] = len;
sq->sg[sq->head].num_segs = 1;
+ sq->sg[sq->head].flags = flags;
+ sq->sg[sq->head].skb = (u64)xdpf;
+}
+
+int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx)
+{
+ struct otx2_snd_queue *sq;
+ int free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = otx2_get_free_sqe(sq);
+ if (free_sqe < sq->sqe_thresh) {
+ netdev_warn(pfvf->netdev, "No free sqe for Send queue%d\n", qidx);
+ return 0;
+ }
+
+ return free_sqe - sq->sqe_thresh;
}
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags)
{
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_snd_queue *sq;
int offset, free_sqe;
sq = &pfvf->qset.sq[qidx];
- free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ free_sqe = otx2_get_free_sqe(sq);
if (free_sqe < sq->sqe_thresh)
return false;
@@ -1409,7 +1478,7 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
offset = sizeof(*sqe_hdr);
- otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ otx2_xdp_sqe_add_sg(sq, xdpf, iova, len, &offset, flags);
sqe_hdr->sizem1 = (offset / 16) - 1;
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
@@ -1420,16 +1489,30 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
- bool *need_xdp_flush)
+ u32 *metasize, bool *need_xdp_flush)
{
+ struct xdp_buff xdp, *xsk_buff = NULL;
unsigned char *hard_start;
+ struct otx2_pool *pool;
+ struct xdp_frame *xdpf;
int qidx = cq->cq_idx;
- struct xdp_buff xdp;
struct page *page;
u64 iova, pa;
u32 act;
int err;
+ pool = &pfvf->qset.pool[qidx];
+
+ if (pool->xsk_pool) {
+ xsk_buff = pool->xdp[--cq->rbpool->xdp_top];
+ if (!xsk_buff)
+ return false;
+
+ xsk_buff->data_end = xsk_buff->data + cqe->sg.seg_size;
+ act = bpf_prog_run_xdp(prog, xsk_buff);
+ goto handle_xdp_verdict;
+ }
+
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_page(phys_to_virt(pa));
@@ -1438,41 +1521,63 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
hard_start = (unsigned char *)phys_to_virt(pa);
xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
- cqe->sg.seg_size, false);
+ cqe->sg.seg_size, true);
act = bpf_prog_run_xdp(prog, &xdp);
+handle_xdp_verdict:
switch (act) {
case XDP_PASS:
+ *metasize = xdp.data - xdp.data_meta;
break;
case XDP_TX:
qidx += pfvf->hw.tx_queues;
cq->pool_ptrs++;
- return otx2_xdp_sq_append_pkt(pfvf, iova,
- cqe->sg.seg_size, qidx);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ return otx2_xdp_sq_append_pkt(pfvf, xdpf,
+ cqe->sg.seg_addr,
+ cqe->sg.seg_size,
+ qidx, OTX2_XDP_TX);
case XDP_REDIRECT:
cq->pool_ptrs++;
- err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
+ if (xsk_buff) {
+ err = xdp_do_redirect(pfvf->netdev, xsk_buff, prog);
+ if (!err) {
+ *need_xdp_flush = true;
+ return true;
+ }
+ return false;
+ }
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
+ err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
if (!err) {
*need_xdp_flush = true;
return true;
}
- put_page(page);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ xdp_return_frame(xdpf);
break;
default:
bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
- break;
+ fallthrough;
case XDP_ABORTED:
- trace_xdp_exception(pfvf->netdev, prog, act);
- break;
+ if (act == XDP_ABORTED)
+ trace_xdp_exception(pfvf->netdev, prog, act);
+ fallthrough;
case XDP_DROP:
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
- put_page(page);
cq->pool_ptrs++;
+ if (xsk_buff) {
+ xsk_buff_free(xsk_buff);
+ } else if (page->pp) {
+ page_pool_recycle_direct(pool->page_pool, page);
+ } else {
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
return true;
}
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index e1db5f961877..acf259d72008 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -12,6 +12,7 @@
#include <linux/iommu.h>
#include <linux/if_vlan.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
#define LBK_CHAN_BASE 0x000
#define SDP_CHAN_BASE 0x700
@@ -76,6 +77,7 @@ struct otx2_rcv_queue {
struct sg_list {
u16 num_segs;
+ u16 flags;
u64 skb;
u64 size[OTX2_MAX_FRAGS_IN_SQE];
u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
@@ -101,6 +103,11 @@ struct otx2_snd_queue {
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
+ /* SQE ring and CPT response queue for Inline IPSEC */
+ struct qmem *sqe_ring;
+ struct qmem *cpt_resp;
+ /* Buffer pool for af_xdp zero-copy */
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_aligned_in_smp;
enum cq_type {
@@ -124,7 +131,11 @@ struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
struct page_pool *page_pool;
+ struct xsk_buff_pool *xsk_pool;
+ struct xdp_buff **xdp;
+ u16 xdp_cnt;
u16 rbsize;
+ u16 xdp_top;
};
struct otx2_cq_queue {
@@ -141,6 +152,7 @@ struct otx2_cq_queue {
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
+ bool xsk_zc_en;
struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 839fc77c11b2..8a8b598bd389 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -14,6 +14,7 @@
#include "otx2_reg.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicvf"
#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
@@ -135,7 +136,7 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
- rsp->hdr.pcifunc = 0;
+ rsp->hdr.pcifunc = req->pcifunc;
rsp->hdr.rc = 0;
err = otx2_mbox_up_handler_cgx_link_event(
vf, (struct cgx_link_info_msg *)req, rsp);
@@ -547,7 +548,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -556,7 +557,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ return err;
}
pci_set_master(pdev);
@@ -564,10 +565,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qcount = num_online_cpus();
qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount);
- if (!netdev) {
- err = -ENOMEM;
- goto err_release_regions;
- }
+ if (!netdev)
+ return -ENOMEM;
pci_set_drvdata(pdev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -693,10 +692,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->bus->number, n);
}
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_ptp_destroy;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_ptp_destroy;
+ goto err_ipsec_clean;
}
err = otx2_vf_wq_init(vf);
@@ -717,19 +720,36 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_shutdown_tc;
+ vf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!vf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_unreg_devlink;
+ }
+
#ifdef CONFIG_DCB
- err = otx2_dcbnl_set_ops(netdev);
- if (err)
- goto err_shutdown_tc;
+ /* Priority flow control is not supported for LBK and SDP vf(s) */
+ if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) {
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_free_zc_bmap;
+ }
#endif
otx2_qos_init(vf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(vf->af_xdp_zc_qidx);
+#endif
+err_unreg_devlink:
+ otx2_unregister_dl(vf);
err_shutdown_tc:
otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(vf);
err_ptp_destroy:
otx2_ptp_destroy(vf);
err_detach_rsrc:
@@ -746,8 +766,6 @@ err_free_irq_vectors:
err_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-err_release_regions:
- pci_release_regions(pdev);
return err;
}
@@ -782,6 +800,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
+ cn10k_ipsec_clean(vf);
otx2_ptp_destroy(vf);
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
@@ -795,8 +814,6 @@ static void otx2vf_remove(struct pci_dev *pdev)
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-
- pci_release_regions(pdev);
}
static struct pci_driver otx2vf_driver = {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
new file mode 100644
index 000000000000..b328aae23d73
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/bpf_trace.h>
+#include <linux/stringify.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "otx2_xsk.h"
+
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx)
+{
+ struct xdp_buff *xdp;
+ int delta;
+
+ xdp = xsk_buff_alloc(pool->xsk_pool);
+ if (!xdp)
+ return -ENOMEM;
+
+ pool->xdp[pool->xdp_top++] = xdp;
+ *dma = OTX2_DATA_ALIGN(xsk_buff_xdp_get_dma(xdp));
+ /* Adjust xdp->data for unaligned addresses */
+ delta = *dma - xsk_buff_xdp_get_dma(xdp);
+ xdp->data += delta;
+
+ return 0;
+}
+
+static int otx2_xsk_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_rq_aq;
+ struct npa_aq_enq_req *aura_aq;
+ struct npa_aq_enq_req *pool_aq;
+ struct nix_aq_enq_req *rq_aq;
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_rq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_rq_aq)
+ return -ENOMEM;
+ cn10k_rq_aq->qidx = qidx;
+ cn10k_rq_aq->rq.ena = 0;
+ cn10k_rq_aq->rq_mask.ena = 1;
+ cn10k_rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ cn10k_rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ } else {
+ rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!rq_aq)
+ return -ENOMEM;
+ rq_aq->qidx = qidx;
+ rq_aq->sq.ena = 0;
+ rq_aq->sq_mask.ena = 1;
+ rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ }
+
+ aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aura_aq)
+ goto fail;
+
+ aura_aq->aura_id = aura_id;
+ aura_aq->aura.ena = 0;
+ aura_aq->aura_mask.ena = 1;
+ aura_aq->ctype = NPA_AQ_CTYPE_AURA;
+ aura_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!pool_aq)
+ goto fail;
+
+ pool_aq->aura_id = aura_id;
+ pool_aq->pool.ena = 0;
+ pool_aq->pool_mask.ena = 1;
+
+ pool_aq->ctype = NPA_AQ_CTYPE_POOL;
+ pool_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ return -ENOMEM;
+}
+
+static void otx2_clean_up_rq(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
+ u64 iova;
+
+ /* If the DOWN flag is set SQs are already freed */
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return;
+
+ cq = &qset->cq[qidx];
+ if (cq)
+ otx2_cleanup_rx_cqes(pfvf, cq, qidx);
+
+ pool = &pfvf->qset.pool[qidx];
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ while (iova) {
+ iova -= OTX2_HEAD_ROOM;
+ otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ otx2_xsk_ctx_disable(pfvf, qidx, qidx);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ u16 rx_queues = pf->hw.rx_queues;
+ u16 tx_queues = pf->hw.tx_queues;
+ int err;
+
+ if (qidx >= rx_queues || qidx >= tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, pf->dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ if (err)
+ return err;
+
+ set_bit(qidx, pf->af_xdp_zc_qidx);
+ otx2_clean_up_rq(pf, qidx);
+ /* Reconfigure RSS table as 'qidx' cannot be part of RSS now */
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP);
+ /* Kick start the NAPI context so that receiving will start */
+ return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX);
+}
+
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx)
+{
+ struct net_device *netdev = pf->netdev;
+ struct xsk_buff_pool *pool;
+ struct otx2_snd_queue *sq;
+
+ pool = xsk_get_pool_from_qid(netdev, qidx);
+ if (!pool)
+ return -EINVAL;
+
+ sq = &pf->qset.sq[qidx + pf->hw.tx_queues];
+ sq->xsk_pool = NULL;
+ otx2_clean_up_rq(pf, qidx);
+ clear_bit(qidx, pf->af_xdp_zc_qidx);
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ /* Reconfigure RSS table as 'qidx' now need to be part of RSS now */
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP);
+
+ return 0;
+}
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ if (pool)
+ return otx2_xsk_pool_enable(pf, pool, qidx);
+
+ return otx2_xsk_pool_disable(pf, qidx);
+}
+
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(dev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return -ENETDOWN;
+
+ if (queue_id >= pf->hw.rx_queues || queue_id >= pf->hw.tx_queues)
+ return -EINVAL;
+
+ cq_poll = &qset->napi[queue_id];
+ if (!cq_poll)
+ return -EINVAL;
+
+ /* Trigger interrupt */
+ if (!napi_if_scheduled_mark_missed(&cq_poll->napi)) {
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0));
+ otx2_write64(pf, NIX_LF_CINTX_INT_W1S(cq_poll->cint_idx), BIT_ULL(0));
+ }
+
+ return 0;
+}
+
+void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx)
+{
+ if (test_bit(qidx, pfvf->af_xdp_zc_qidx))
+ sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx);
+}
+
+static void otx2_xsk_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len,
+ u16 qidx)
+{
+ struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_snd_queue *sq;
+ int offset;
+
+ sq = &pfvf->qset.sq[qidx];
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+
+ if (!sqe_hdr->total) {
+ sqe_hdr->aura = sq->aura_id;
+ sqe_hdr->df = 1;
+ sqe_hdr->sq = qidx;
+ sqe_hdr->pnc = 1;
+ }
+ sqe_hdr->total = len;
+ sqe_hdr->sqe_id = sq->head;
+
+ offset = sizeof(*sqe_hdr);
+
+ otx2_xdp_sqe_add_sg(sq, NULL, iova, len, &offset, OTX2_AF_XDP_FRAME);
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+}
+
+void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
+ int queue, int budget)
+{
+ struct xdp_desc *xdp_desc = pool->tx_descs;
+ int i, batch;
+
+ budget = min(budget, otx2_read_free_sqe(pfvf, queue));
+ batch = xsk_tx_peek_release_desc_batch(pool, budget);
+ if (!batch)
+ return;
+
+ for (i = 0; i < batch; i++) {
+ dma_addr_t dma_addr;
+
+ dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr);
+ otx2_xsk_sq_append_pkt(pfvf, dma_addr, xdp_desc[i].len, queue);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
new file mode 100644
index 000000000000..8047fafee8fe
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef OTX2_XSK_H
+#define OTX2_XSK_H
+
+struct otx2_nic;
+struct xsk_buff_pool;
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qid);
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx);
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
+ int queue, int budget);
+void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx);
+
+#endif /* OTX2_XSK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 0f844c14485a..5765bac119f0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -165,6 +165,11 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
} else if (level == NIX_TXSCH_LVL_TL2) {
+ /* configure parent txschq */
+ cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq);
+ cfg->regval[num_regs] = (u64)hw->tx_link << 16;
+ num_regs++;
+
/* configure link cfg */
if (level == pfvf->qos.link_cfg_lvl) {
cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
@@ -1633,6 +1638,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
if (!node->is_static)
dwrr_del_node = true;
+ WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
/* destroy the leaf node */
otx2_qos_disable_sq(pfvf, qid);
otx2_qos_destroy_node(pfvf, node);
@@ -1677,9 +1683,6 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
}
kfree(new_cfg);
- /* update tx_real_queues */
- otx2_qos_update_tx_netdev_queues(pfvf);
-
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index 9d887bfc3108..58d572ce08ef 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -82,7 +82,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
if (err)
goto sqb_free;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
@@ -256,6 +256,26 @@ out:
return err;
}
+static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf)
+{
+ struct ndc_sync_op *req;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->nix_lf_tx_sync = true;
+ req->npa_lf_sync = true;
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
{
struct otx2_qset *qset = &pfvf->qset;
@@ -285,6 +305,8 @@ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
otx2_qos_sqb_flush(pfvf, sq_idx);
otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
+ /* NIX/NPA NDC sync */
+ otx2_qos_nix_npa_ndc_sync(pfvf);
otx2_cleanup_tx_cqes(pfvf, cq);
mutex_lock(&pfvf->mbox.lock);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
index 232b10740c13..2cd3da3b6843 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -67,6 +67,8 @@ static int rvu_rep_mcam_flow_init(struct rep_dev *rep)
rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
(&priv->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ goto exit;
for (ent = 0; ent < rsp->count; ent++)
rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
@@ -680,14 +682,17 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
ndev->features |= ndev->hw_features;
eth_hw_addr_random(ndev);
err = rvu_rep_devlink_port_register(rep);
- if (err)
+ if (err) {
+ free_netdev(ndev);
goto exit;
+ }
SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port);
err = register_netdev(ndev);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"PFVF representor registration failed");
+ rvu_rep_devlink_port_unregister(rep);
free_netdev(ndev);
goto exit;
}
@@ -760,7 +765,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -769,7 +774,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ goto err_set_drv_data;
}
pci_set_master(pdev);
@@ -777,7 +782,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
err = -ENOMEM;
- goto err_release_regions;
+ goto err_set_drv_data;
}
pci_set_drvdata(pdev, priv);
@@ -794,7 +799,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = otx2_init_rsrc(pdev, priv);
if (err)
- goto err_release_regions;
+ goto err_set_drv_data;
priv->iommu_domain = iommu_get_domain_for_dev(dev);
@@ -817,9 +822,8 @@ err_detach_rsrc:
otx2_disable_mbox_intr(priv);
otx2_pfaf_mbox_destroy(priv);
pci_free_irq_vectors(pdev);
-err_release_regions:
+err_set_drv_data:
pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
return err;
}
@@ -839,7 +843,6 @@ static void rvu_rep_remove(struct pci_dev *pdev)
otx2_pfaf_mbox_destroy(priv);
pci_free_irq_vectors(priv->pdev);
pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
}
static struct pci_driver rvu_rep_driver = {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
index 4cd53a2dae46..634f4543c1d7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_counter.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
@@ -336,8 +336,7 @@ prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx)
static void prestera_counter_stats_work(struct work_struct *work)
{
- struct delayed_work *dl_work =
- container_of(work, struct delayed_work, work);
+ struct delayed_work *dl_work = to_delayed_work(work);
struct prestera_counter *counter =
container_of(dl_work, struct prestera_counter, stats_dw);
struct prestera_counter_block *block;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 22ca6ee9665e..71ffb55d1fc4 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -280,6 +280,7 @@ prestera_mac_select_pcs(struct phylink_config *config,
}
static void prestera_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct prestera_port *port = container_of(pcs, struct prestera_port,
@@ -395,7 +396,6 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
continue;
port->phylink_pcs.ops = &prestera_pcs_ops;
- port->phylink_pcs.neg_mode = true;
port->phy_config.dev = &port->dev->dev;
port->phy_config.type = PHYLINK_NETDEV;
@@ -634,7 +634,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_dl_port_register;
dev->features |= NETIF_F_HW_TC;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
SET_NETDEV_DEV(dev, sw->dev->dev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 35857dc19542..c45d108b2f6d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -845,9 +845,9 @@ static int prestera_pci_probe(struct pci_dev *pdev,
goto err_pci_enable_device;
}
- err = pci_request_regions(pdev, driver_name);
+ err = pcim_request_all_regions(pdev, driver_name);
if (err) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
+ dev_err(&pdev->dev, "pcim_request_all_regions failed\n");
goto err_pci_request_regions;
}
@@ -938,7 +938,6 @@ err_pci_dev_alloc:
err_pp_ioremap:
err_mem_ioremap:
err_dma_mask:
- pci_release_regions(pdev);
err_pci_request_regions:
err_pci_enable_device:
return err;
@@ -953,7 +952,6 @@ static void prestera_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
destroy_workqueue(fw->wq);
prestera_fw_uninit(fw);
- pci_release_regions(pdev);
}
static const struct pci_device_id prestera_pci_devices[] = {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 2bf426cea6dd..e4cfdc8bc055 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -353,7 +353,7 @@ static void rxq_refill(struct net_device *dev)
static inline void rxq_refill_timer_wrapper(struct timer_list *t)
{
- struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
+ struct pxa168_eth_private *pep = timer_container_of(pep, t, timeout);
napi_schedule(&pep->napi);
}
@@ -1175,7 +1175,7 @@ static int pxa168_eth_stop(struct net_device *dev)
/* Write to ICR to clear interrupts. */
wrl(pep, INT_W_CLEAR, 0);
napi_disable(&pep->napi);
- del_timer_sync(&pep->timeout);
+ timer_delete_sync(&pep->timeout);
netif_carrier_off(dev);
free_irq(dev->irq, dev);
rxq_deinit(dev);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 25bf6ec44289..05349a0b2db1 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -1494,7 +1494,7 @@ static int xm_check_link(struct net_device *dev)
*/
static void xm_link_timer(struct timer_list *t)
{
- struct skge_port *skge = from_timer(skge, t, link_timer);
+ struct skge_port *skge = timer_container_of(skge, t, link_timer);
struct net_device *dev = skge->netdev;
struct skge_hw *hw = skge->hw;
int port = skge->port;
@@ -2662,7 +2662,7 @@ static int skge_down(struct net_device *dev)
netif_tx_disable(dev);
if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)
- del_timer_sync(&skge->link_timer);
+ timer_delete_sync(&skge->link_timer);
napi_disable(&skge->napi);
netif_carrier_off(dev);
@@ -3742,10 +3742,7 @@ static int skge_device_event(struct notifier_block *unused,
skge = netdev_priv(dev);
switch (event) {
case NETDEV_CHANGENAME:
- if (skge->debugfs)
- skge->debugfs = debugfs_rename(skge_debug,
- skge->debugfs,
- skge_debug, dev->name);
+ debugfs_change_name(skge->debugfs, "%s", dev->name);
break;
case NETDEV_GOING_DOWN:
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 3914cd9210d4..3831f533b9db 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -130,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
@@ -2960,7 +2961,7 @@ static int sky2_rx_hung(struct net_device *dev)
static void sky2_watchdog(struct timer_list *t)
{
- struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
+ struct sky2_hw *hw = timer_container_of(hw, t, watchdog_timer);
/* Check for lost IRQ once a second */
if (sky2_read32(hw, B0_ISRC)) {
@@ -4493,10 +4494,7 @@ static int sky2_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_CHANGENAME:
- if (sky2->debugfs) {
- sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
- sky2_debug, dev->name);
- }
+ debugfs_change_name(sky2->debugfs, "%s", dev->name);
break;
case NETDEV_GOING_DOWN:
@@ -5054,7 +5052,7 @@ static int sky2_suspend(struct device *dev)
if (!hw)
return 0;
- del_timer_sync(&hw->watchdog_timer);
+ timer_delete_sync(&hw->watchdog_timer);
cancel_work_sync(&hw->restart_work);
rtnl_lock();
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 95c4405b7d7b..7bfd3f230ff5 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -7,14 +7,6 @@ config NET_VENDOR_MEDIATEK
if NET_VENDOR_MEDIATEK
-config NET_AIROHA
- tristate "Airoha SoC Gigabit Ethernet support"
- depends on NET_DSA || !NET_DSA
- select PAGE_POOL
- help
- This driver supports the gigabit ethernet MACs in the
- Airoha SoC family.
-
config NET_MEDIATEK_SOC_WED
depends on ARCH_MEDIATEK || COMPILE_TEST
def_bool NET_MEDIATEK_SOC != n
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index ddbb7f4a516c..03e008fbc859 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -11,4 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
-obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 7c27a19c4d8f..b4c01e2878f6 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -14,7 +14,7 @@
struct mtk_eth_muxc {
const char *name;
- int cap_bit;
+ u64 cap_bit;
int (*set_path)(struct mtk_eth *eth, u64 path);
};
@@ -31,6 +31,8 @@ static const char *mtk_eth_path_name(u64 path)
return "gmac2_rgmii";
case MTK_ETH_PATH_GMAC2_SGMII:
return "gmac2_sgmii";
+ case MTK_ETH_PATH_GMAC2_2P5GPHY:
+ return "gmac2_2p5gphy";
case MTK_ETH_PATH_GMAC2_GEPHY:
return "gmac2_gephy";
case MTK_ETH_PATH_GDM1_ESW:
@@ -127,6 +129,29 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path)
return 0;
}
+static int set_mux_gmac2_to_2p5gphy(struct mtk_eth *eth, u64 path)
+{
+ int ret;
+
+ if (path == MTK_ETH_PATH_GMAC2_2P5GPHY) {
+ ret = regmap_clear_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_GMAC2_V2);
+ if (ret)
+ return ret;
+
+ /* Setup mux to 2p5g PHY */
+ ret = regmap_clear_bits(eth->infra, TOP_MISC_NETSYS_PCS_MUX,
+ MUX_G2_USXGMII_SEL);
+ if (ret)
+ return ret;
+
+ dev_dbg(eth->dev, "path %s in %s updated\n",
+ mtk_eth_path_name(path), __func__);
+ }
+
+ return 0;
+}
+
static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
@@ -210,6 +235,10 @@ static const struct mtk_eth_muxc mtk_eth_muxc[] = {
.cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
.set_path = set_mux_u3_gmac2_to_qphy,
}, {
+ .name = "mux_gmac2_to_2p5gphy",
+ .cap_bit = MTK_ETH_MUX_GMAC2_TO_2P5GPHY,
+ .set_path = set_mux_gmac2_to_2p5gphy,
+ }, {
.name = "mux_gmac1_gmac2_to_sgmii_rgmii",
.cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
.set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii,
@@ -260,6 +289,20 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
return mtk_eth_mux_setup(eth, path);
}
+int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ u64 path = 0;
+
+ if (mac_id == MTK_GMAC2_ID)
+ path = MTK_ETH_PATH_GMAC2_2P5GPHY;
+
+ if (!path)
+ return -EINVAL;
+
+ /* Setup proper MUXes along the path */
+ return mtk_eth_mux_setup(eth, path);
+}
+
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
u64 path = 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 53485142938c..b38e4f2de674 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -269,12 +269,8 @@ static const char * const mtk_clks_source_name[] = {
"ethwarp_wocpu2",
"ethwarp_wocpu1",
"ethwarp_wocpu0",
- "top_usxgmii0_sel",
- "top_usxgmii1_sel",
"top_sgm0_sel",
"top_sgm1_sel",
- "top_xfi_phy0_xtal_sel",
- "top_xfi_phy1_xtal_sel",
"top_eth_gmii_sel",
"top_eth_refck_50m_sel",
"top_eth_sys_200m_sel",
@@ -507,7 +503,7 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
static void mtk_setup_bridge_switch(struct mtk_eth *eth)
{
/* Force Port1 XGMAC Link Up */
- mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
+ mtk_m32(eth, 0, MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID),
MTK_XGMAC_STS(MTK_GMAC1_ID));
/* Adjust GSW bridge IPG to 11 */
@@ -536,6 +532,26 @@ static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
return NULL;
}
+static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+
+ if (mtk_interface_mode_is_xgmii(eth, iface) &&
+ mac->id != MTK_GMAC1_ID) {
+ mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE,
+ XMAC_MCR_TRX_DISABLE, MTK_XMAC_MCR(mac->id));
+
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_MODE(mac->id) |
+ MTK_XGMAC_FORCE_LINK(mac->id),
+ MTK_XGMAC_FORCE_MODE(mac->id), MTK_XGMAC_STS(mac->id));
+ }
+
+ return 0;
+}
+
static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -577,6 +593,12 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
}
break;
case PHY_INTERFACE_MODE_INTERNAL:
+ if (mac->id == MTK_GMAC2_ID &&
+ MTK_HAS_CAPS(eth->soc->caps, MTK_2P5GPHY)) {
+ err = mtk_gmac_2p5gphy_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
break;
default:
goto err_phy;
@@ -648,12 +670,12 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
}
/* Setup gmac */
- if (mtk_is_netsys_v3_or_greater(eth) &&
- mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
+ if (mtk_interface_mode_is_xgmii(eth, state->interface)) {
mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
- mtk_setup_bridge_switch(eth);
+ if (mac->id == MTK_GMAC1_ID)
+ mtk_setup_bridge_switch(eth);
}
return;
@@ -700,10 +722,19 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
- u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
- mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+ if (!mtk_interface_mode_is_xgmii(mac->hw, interface)) {
+ /* GMAC modes */
+ mtk_m32(mac->hw,
+ MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK, 0,
+ MTK_MAC_MCR(mac->id));
+ } else if (mac->id != MTK_GMAC1_ID) {
+ /* XGMAC except for built-in switch */
+ mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, XMAC_MCR_TRX_DISABLE,
+ MTK_XMAC_MCR(mac->id));
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), 0,
+ MTK_XGMAC_STS(mac->id));
+ }
}
static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
@@ -734,7 +765,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
case SPEED_100:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
break;
case SPEED_1000:
@@ -757,13 +788,13 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
case SPEED_100:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
break;
case SPEED_1000:
val |= MTK_QTX_SCH_MAX_RATE_EN |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
break;
default:
@@ -775,13 +806,12 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
}
-static void mtk_mac_link_up(struct phylink_config *config,
- struct phy_device *phy,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex, bool tx_pause, bool rx_pause)
+static void mtk_gdm_mac_link_up(struct mtk_mac *mac,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
u32 mcr;
mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
@@ -815,32 +845,145 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
+static void mtk_xgdm_mac_link_up(struct mtk_mac *mac,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ u32 mcr;
+
+ if (mac->id == MTK_GMAC1_ID)
+ return;
+
+ /* Eliminate the interference(before link-up) caused by PHY noise */
+ mtk_m32(mac->hw, XMAC_LOGIC_RST, 0, MTK_XMAC_LOGIC_RST(mac->id));
+ mdelay(20);
+ mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR,
+ MTK_XMAC_CNT_CTRL(mac->id));
+
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id),
+ MTK_XGMAC_FORCE_LINK(mac->id), MTK_XGMAC_STS(mac->id));
+
+ mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
+ mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC |
+ XMAC_MCR_TRX_DISABLE);
+ /* Configure pause modes -
+ * phylink will avoid these for half duplex
+ */
+ if (tx_pause)
+ mcr |= XMAC_MCR_FORCE_TX_FC;
+ if (rx_pause)
+ mcr |= XMAC_MCR_FORCE_RX_FC;
+
+ mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
+}
+
+static void mtk_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+
+ if (mtk_interface_mode_is_xgmii(mac->hw, interface))
+ mtk_xgdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+ else
+ mtk_gdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+}
+
+static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+
+ mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id));
+}
+
+static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 val;
+
+ if (mtk_interface_mode_is_xgmii(eth, mac->interface))
+ return -EOPNOTSUPP;
+
+ /* Tx idle timer in ms */
+ timer = DIV_ROUND_UP(timer, 1000);
+
+ /* If the timer is zero, then set LPI_MODE, which allows the
+ * system to enter LPI mode immediately rather than waiting for
+ * the LPI threshold.
+ */
+ if (!timer)
+ val = MAC_EEE_LPI_MODE;
+ else if (FIELD_FIT(MAC_EEE_LPI_TXIDLE_THD, timer))
+ val = FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD, timer);
+ else
+ val = MAC_EEE_LPI_TXIDLE_THD;
+
+ if (tx_clk_stop)
+ val |= MAC_EEE_CKG_TXIDLE;
+
+ /* PHY Wake-up time, this field does not have a reset value, so use the
+ * reset value from MT7531 (36us for 100M and 17us for 1000M).
+ */
+ val |= FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 17) |
+ FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 36);
+
+ mtk_w32(eth, val, MTK_MAC_EEECR(mac->id));
+ mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id));
+
+ return 0;
+}
+
static const struct phylink_mac_ops mtk_phylink_ops = {
+ .mac_prepare = mtk_mac_prepare,
.mac_select_pcs = mtk_mac_select_pcs,
.mac_config = mtk_mac_config,
.mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
.mac_link_up = mtk_mac_link_up,
+ .mac_disable_tx_lpi = mtk_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mtk_mac_enable_tx_lpi,
};
+static void mtk_mdio_config(struct mtk_eth *eth)
+{
+ u32 val;
+
+ /* Configure MDC Divider */
+ val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider);
+
+ /* Configure MDC Turbo Mode */
+ if (mtk_is_netsys_v3_or_greater(eth))
+ mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
+ else
+ val |= PPSC_MDC_TURBO;
+
+ mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
+}
+
static int mtk_mdio_init(struct mtk_eth *eth)
{
- unsigned int max_clk = 2500000, divider;
+ unsigned int max_clk = 2500000;
struct device_node *mii_np;
int ret;
u32 val;
- mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
+ mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
dev_err(eth->dev, "no %s child node found", "mdio-bus");
return -ENODEV;
}
- if (!of_device_is_available(mii_np)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
eth->mii_bus = devm_mdiobus_alloc(eth->dev);
if (!eth->mii_bus) {
ret = -ENOMEM;
@@ -865,20 +1008,9 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
max_clk = val;
}
- divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
-
- /* Configure MDC Turbo Mode */
- if (mtk_is_netsys_v3_or_greater(eth))
- mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
-
- /* Configure MDC Divider */
- val = FIELD_PREP(PPSC_MDC_CFG, divider);
- if (!mtk_is_netsys_v3_or_greater(eth))
- val |= PPSC_MDC_TURBO;
- mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
-
- dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
-
+ eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
+ mtk_mdio_config(eth);
+ dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider);
ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
@@ -2079,7 +2211,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (ring->page_pool) {
struct page *page = virt_to_head_page(data);
struct xdp_buff xdp;
- u32 ret;
+ u32 ret, metasize;
new_data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr,
@@ -2095,7 +2227,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
- false);
+ true);
xdp_buff_clear_frags_flag(&xdp);
ret = mtk_xdp_run(eth, ring, &xdp, netdev);
@@ -2115,6 +2247,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb_mark_for_recycle(skb);
} else {
if (ring->frag_size <= PAGE_SIZE)
@@ -2201,14 +2336,18 @@ skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
release_desc:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
+ if (unlikely(dma_addr == DMA_MAPPING_ERROR))
+ addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
+ rxd->rxd2);
+ else
+ addr64 = RX_DMA_PREP_ADDR64(dma_addr);
+ }
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
- rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
- likely(dma_addr != DMA_MAPPING_ERROR))
- rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
ring->calc_idx = idx;
done++;
@@ -3135,11 +3274,19 @@ static int mtk_dma_init(struct mtk_eth *eth)
static void mtk_dma_free(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
- int i;
+ int i, j, txqs = 1;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ txqs = MTK_QDMA_NUM_QUEUES;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ for (j = 0; j < txqs; j++)
+ netdev_tx_reset_subqueue(eth->netdev[i], j);
+ }
- for (i = 0; i < MTK_MAX_DEVS; i++)
- if (eth->netdev[i])
- netdev_reset_queue(eth->netdev[i]);
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
MTK_QDMA_RING_SIZE * soc->tx.desc_size,
@@ -3269,7 +3416,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
if (mtk_is_netsys_v2_or_greater(eth))
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
- MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
+ MTK_CHK_DDONE_EN;
else
val |= MTK_RX_BT_32DWORDS;
mtk_w32(eth, val, reg_map->qdma.glo_cfg);
@@ -3414,9 +3561,6 @@ static int mtk_open(struct net_device *dev)
}
mtk_gdm_config(eth, target_mac->id, gdm_config);
}
- /* Reset and enable PSE */
- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
- mtk_w32(eth, 0, MTK_RST_GL);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
@@ -3928,6 +4072,10 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
else
mtk_hw_reset(eth);
+ /* No MT7628/88 support yet */
+ if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ mtk_mdio_config(eth);
+
if (mtk_is_netsys_v3_or_greater(eth)) {
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
@@ -3988,11 +4136,27 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (mtk_is_netsys_v3_or_greater(eth)) {
- /* PSE should not drop port1, port8 and port9 packets */
- mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
+ /* PSE dummy page mechanism */
+ mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
+ PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
+
+ /* PSE free buffer drop threshold */
+ mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
+
+ /* PSE should not drop port8, port9 and port13 packets from
+ * WDMA Tx
+ */
+ mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
+
+ /* PSE should drop packets to port8, port9 and port13 on WDMA Rx
+ * ring full
+ */
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
/* GDM and CDM Threshold */
- mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
/* Disable GDM1 RX CRC stripping */
@@ -4009,7 +4173,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
- mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
+ mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
/* PSE Free Queue Flow Control */
mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
@@ -4474,6 +4638,20 @@ static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
return phylink_ethtool_set_pauseparam(mac->phylink, pause);
}
+static int mtk_get_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_get_eee(mac->phylink, eee);
+}
+
+static int mtk_set_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_set_eee(mac->phylink, eee);
+}
+
static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -4506,6 +4684,8 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.set_pauseparam = mtk_set_pauseparam,
.get_rxnfc = mtk_get_rxnfc,
.set_rxnfc = mtk_set_rxnfc,
+ .get_eee = mtk_get_eee,
+ .set_eee = mtk_set_eee,
};
static const struct net_device_ops mtk_netdev_ops = {
@@ -4615,6 +4795,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.type = PHYLINK_NETDEV;
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+ mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD |
+ MAC_2500FD;
+ mac->phylink_config.lpi_timer_default = 1000;
/* MT7623 gmac0 is now missing its speed-specific PLL configuration
* in its .mac_config method (since state->speed is not valid there.
@@ -4653,7 +4836,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
}
if (mtk_is_netsys_v3_or_greater(mac->hw) &&
- MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
id == MTK_GMAC1_ID) {
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE |
@@ -4673,6 +4856,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink = phylink;
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_2P5GPHY) &&
+ id == MTK_GMAC2_ID)
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ mac->phylink_config.supported_interfaces);
+
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 0d5225f1d3ee..6f72a8c8ae1e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -151,7 +151,15 @@
#define PSE_FQFC_CFG1 0x100
#define PSE_FQFC_CFG2 0x104
#define PSE_DROP_CFG 0x108
-#define PSE_PPE0_DROP 0x110
+#define PSE_PPE_DROP(x) (0x110 + ((x) * 0x4))
+
+/* PSE Last FreeQ Page Request Control */
+#define PSE_DUMY_REQ 0x10C
+/* PSE_DUMY_REQ is not a typo but actually called like that also in
+ * MediaTek's datasheet
+ */
+#define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x))
+#define DUMMY_PAGE_THR 0x1
/* PSE Input Queue Reservation Register*/
#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
@@ -423,7 +431,8 @@
/* XMAC status registers */
#define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
-#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_XGMAC_FORCE_MODE(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(27) : BIT(11))
#define MTK_USXGMII_PCS_LINK BIT(8)
#define MTK_XGMAC_RX_FC BIT(5)
#define MTK_XGMAC_TX_FC BIT(4)
@@ -453,6 +462,8 @@
#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
#define MAC_MCR_BACKOFF_EN BIT(9)
#define MAC_MCR_BACKPR_EN BIT(8)
+#define MAC_MCR_EEE1G BIT(7)
+#define MAC_MCR_EEE100M BIT(6)
#define MAC_MCR_FORCE_RX_FC BIT(5)
#define MAC_MCR_FORCE_TX_FC BIT(4)
#define MAC_MCR_SPEED_1000 BIT(3)
@@ -461,6 +472,15 @@
#define MAC_MCR_FORCE_LINK BIT(0)
#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
+/* Mac EEE control registers */
+#define MTK_MAC_EEECR(x) (0x10104 + (x * 0x100))
+#define MAC_EEE_WAKEUP_TIME_1000 GENMASK(31, 24)
+#define MAC_EEE_WAKEUP_TIME_100 GENMASK(23, 16)
+#define MAC_EEE_LPI_TXIDLE_THD GENMASK(15, 8)
+#define MAC_EEE_CKG_TXIDLE BIT(3)
+#define MAC_EEE_CKG_RXLPI BIT(2)
+#define MAC_EEE_LPI_MODE BIT(0)
+
/* Mac status registers */
#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
#define MAC_MSR_EEE1G BIT(7)
@@ -505,6 +525,21 @@
#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
#define INTF_MODE_RGMII_10_100 0
+/* XFI Mac control registers */
+#define MTK_XMAC_BASE(x) (0x12000 + (((x) - 1) * 0x1000))
+#define MTK_XMAC_MCR(x) (MTK_XMAC_BASE(x))
+#define XMAC_MCR_TRX_DISABLE 0xf
+#define XMAC_MCR_FORCE_TX_FC BIT(5)
+#define XMAC_MCR_FORCE_RX_FC BIT(4)
+
+/* XFI Mac logic reset registers */
+#define MTK_XMAC_LOGIC_RST(x) (MTK_XMAC_BASE(x) + 0x10)
+#define XMAC_LOGIC_RST BIT(0)
+
+/* XFI Mac count global control */
+#define MTK_XMAC_CNT_CTRL(x) (MTK_XMAC_BASE(x) + 0x100)
+#define XMAC_GLB_CNTCLR BIT(0)
+
/* GPIO port control registers for GMAC 2*/
#define GPIO_OD33_CTRL8 0x4c0
#define GPIO_BIAS_CTRL 0xed0
@@ -568,6 +603,10 @@
#define GEPHY_MAC_SEL BIT(1)
/* Top misc registers */
+#define TOP_MISC_NETSYS_PCS_MUX 0x0
+#define NETSYS_PCS_MUX_MASK GENMASK(1, 0)
+#define MUX_G2_USXGMII_SEL BIT(1)
+
#define USB_PHY_SWITCH_REG 0x218
#define QPHY_SEL_MASK GENMASK(1, 0)
#define SGMII_QPHY_SEL 0x2
@@ -932,6 +971,7 @@ enum mkt_eth_capabilities {
MTK_RGMII_BIT = 0,
MTK_TRGMII_BIT,
MTK_SGMII_BIT,
+ MTK_2P5GPHY_BIT,
MTK_ESW_BIT,
MTK_GEPHY_BIT,
MTK_MUX_BIT,
@@ -952,6 +992,7 @@ enum mkt_eth_capabilities {
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
+ MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT,
MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
@@ -961,6 +1002,7 @@ enum mkt_eth_capabilities {
MTK_ETH_PATH_GMAC1_SGMII_BIT,
MTK_ETH_PATH_GMAC2_RGMII_BIT,
MTK_ETH_PATH_GMAC2_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_2P5GPHY_BIT,
MTK_ETH_PATH_GMAC2_GEPHY_BIT,
MTK_ETH_PATH_GDM1_ESW_BIT,
};
@@ -969,6 +1011,7 @@ enum mkt_eth_capabilities {
#define MTK_RGMII BIT_ULL(MTK_RGMII_BIT)
#define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT)
#define MTK_SGMII BIT_ULL(MTK_SGMII_BIT)
+#define MTK_2P5GPHY BIT_ULL(MTK_2P5GPHY_BIT)
#define MTK_ESW BIT_ULL(MTK_ESW_BIT)
#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
#define MTK_MUX BIT_ULL(MTK_MUX_BIT)
@@ -991,6 +1034,8 @@ enum mkt_eth_capabilities {
BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+#define MTK_ETH_MUX_GMAC2_TO_2P5GPHY \
+ BIT_ULL(MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT)
#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
@@ -1002,6 +1047,7 @@ enum mkt_eth_capabilities {
#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_2P5GPHY BIT_ULL(MTK_ETH_PATH_GMAC2_2P5GPHY_BIT)
#define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
#define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT)
@@ -1011,6 +1057,7 @@ enum mkt_eth_capabilities {
#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
+#define MTK_GMAC2_2P5GPHY (MTK_ETH_PATH_GMAC2_2P5GPHY | MTK_2P5GPHY)
#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
/* MUXes present on SoCs */
@@ -1030,6 +1077,10 @@ enum mkt_eth_capabilities {
(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
MTK_SHARED_SGMII)
+/* 2: GMAC2 -> 2P5GPHY */
+#define MTK_MUX_GMAC2_TO_2P5GPHY \
+ (MTK_ETH_MUX_GMAC2_TO_2P5GPHY | MTK_MUX | MTK_INFRA)
+
/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
@@ -1065,8 +1116,9 @@ enum mkt_eth_capabilities {
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_RSTCTRL_PPE1 | MTK_SRAM)
-#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_QDMA | \
- MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | MTK_SRAM)
+#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_GMAC2_2P5GPHY | \
+ MTK_MUX_GMAC2_TO_2P5GPHY | MTK_QDMA | MTK_RSTCTRL_PPE1 | \
+ MTK_RSTCTRL_PPE2 | MTK_SRAM)
struct mtk_tx_dma_desc_info {
dma_addr_t addr;
@@ -1126,7 +1178,7 @@ struct mtk_reg_map {
};
/* struct mtk_eth_data - This is the structure holding all differences
- * among various plaforms
+ * among various platforms
* @reg_map Soc register map.
* @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon
@@ -1226,7 +1278,7 @@ struct mtk_soc_data {
* @mii_bus: If there is a bus we need to create an instance for it
* @pending_work: The workqueue used to reset the dma ring
* @state: Initialization and runtime state of the device
- * @soc: Holding specific data among vaious SoCs
+ * @soc: Holding specific data among various SoCs
*/
struct mtk_eth {
@@ -1260,6 +1312,7 @@ struct mtk_eth {
struct clk *clks[MTK_CLK_MAX];
struct mii_bus *mii_bus;
+ unsigned int mdc_divider;
struct work_struct pending_work;
unsigned long state;
@@ -1417,6 +1470,23 @@ static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
return MTK_FOE_IB2_MULTICAST;
}
+static inline bool mtk_interface_mode_is_xgmii(struct mtk_eth *eth,
+ phy_interface_t interface)
+{
+ if (!mtk_is_netsys_v3_or_greater(eth))
+ return false;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_5GBASER:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
@@ -1425,6 +1495,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index f20bb390df3a..c855fb799ce1 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -34,8 +34,10 @@ struct mtk_flow_data {
u16 vlan_in;
struct {
- u16 id;
- __be16 proto;
+ struct {
+ u16 id;
+ __be16 proto;
+ } vlans[2];
u8 num;
} vlan;
struct {
@@ -349,18 +351,19 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
case FLOW_ACTION_CSUM:
break;
case FLOW_ACTION_VLAN_PUSH:
- if (data.vlan.num == 1 ||
+ if (data.vlan.num + data.pppoe.num == 2 ||
act->vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
- data.vlan.id = act->vlan.vid;
- data.vlan.proto = act->vlan.proto;
+ data.vlan.vlans[data.vlan.num].id = act->vlan.vid;
+ data.vlan.vlans[data.vlan.num].proto = act->vlan.proto;
data.vlan.num++;
break;
case FLOW_ACTION_VLAN_POP:
break;
case FLOW_ACTION_PPPOE_PUSH:
- if (data.pppoe.num == 1)
+ if (data.pppoe.num == 1 ||
+ data.vlan.num == 2)
return -EOPNOTSUPP;
data.pppoe.sid = act->pppoe.sid;
@@ -450,12 +453,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
foe.bridge.vlan = data.vlan_in;
- if (data.vlan.num == 1) {
- if (data.vlan.proto != htons(ETH_P_8021Q))
- return -EOPNOTSUPP;
+ for (i = 0; i < data.vlan.num; i++)
+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id);
- mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
- }
if (data.pppoe.num == 1)
mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 25989c79c92e..b83886a41121 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = priv->ndev;
unsigned int head = ring->head;
unsigned int entry = ring->tail;
+ unsigned long flags;
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
@@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
netif_wake_queue(ndev);
if (napi_complete(napi)) {
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, false, true);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
return 0;
@@ -1341,16 +1342,16 @@ push_new_skb:
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
+ unsigned long flags;
int work_done = 0;
priv = container_of(napi, struct mtk_star_priv, rx_napi);
work_done = mtk_star_rx(priv, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- spin_lock(&priv->lock);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, true, false);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
return work_done;
@@ -1427,15 +1428,10 @@ static int mtk_star_mdio_init(struct net_device *ndev)
of_node = dev->of_node;
- mdio_node = of_get_child_by_name(of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto out_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
@@ -1467,6 +1463,8 @@ static __maybe_unused int mtk_star_suspend(struct device *dev)
if (netif_running(ndev))
mtk_star_disable(ndev);
+ netif_device_detach(ndev);
+
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
return 0;
@@ -1491,6 +1489,8 @@ static __maybe_unused int mtk_star_resume(struct device *dev)
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
}
+ netif_device_attach(ndev);
+
return ret;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index e212a4ba9275..351dd152f4f3 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -2000,7 +2000,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
if (mtk_wed_is_v3_or_greater(dev->hw))
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
- /* initail tx interrupt trigger */
+ /* initial tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
@@ -2011,7 +2011,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
dev->wlan.tx_tbit[1]));
- /* initail txfree interrupt trigger */
+ /* initial txfree interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 825e05fb8607..0b1cb340206f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -7,6 +7,7 @@ config MLX4_EN
tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
depends on PCI && NETDEVICES && ETHERNET && INET
depends on PTP_1588_CLOCK_OPTIONAL
+ select PAGE_POOL
select MLX4_CORE
help
This driver supports Mellanox Technologies ConnectX Ethernet
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index b330020dc0d6..07b061a97a6e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -526,28 +526,6 @@ out:
return res;
}
-u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
-{
- struct mlx4_zone_entry *zone;
- int res = 0;
-
- spin_lock(&zones->lock);
-
- zone = __mlx4_find_zone_by_uid(zones, uid);
-
- if (NULL == zone) {
- res = -1;
- goto out;
- }
-
- __mlx4_free_from_zone(zone, obj, count);
-
-out:
- spin_unlock(&zones->lock);
-
- return res;
-}
-
u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
{
struct mlx4_zone_entry *zone;
@@ -682,9 +660,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
}
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
- struct mlx4_db *db, int order)
+ struct mlx4_db *db, unsigned int order)
{
- int o;
+ unsigned int o;
int i;
for (o = order; o <= 1; ++o) {
@@ -712,7 +690,7 @@ found:
return 0;
}
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 0d8a362c2673..edcc6f662618 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -236,7 +236,7 @@ static void dump_err_buf(struct mlx4_dev *dev)
static void poll_catas(struct timer_list *t)
{
- struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
+ struct mlx4_priv *priv = timer_container_of(priv, t, catas_err.timer);
struct mlx4_dev *dev = &priv->dev;
u32 slave_read;
@@ -305,7 +305,7 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- del_timer_sync(&priv->catas_err.timer);
+ timer_delete_sync(&priv->catas_err.timer);
if (priv->catas_err.map) {
iounmap(priv->catas_err.map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index cd754cd76bde..d73a2044dc26 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -249,7 +249,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
- u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 max_val_cycles = freq_khz * 1000ULL * MLX4_EN_WRAP_AROUND_SEC;
u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index cd17a3f4faf8..a68cd3f0304c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1897,6 +1897,7 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
info->so_timestamping |=
SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 15c57e9517e9..b33285d755b9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -48,60 +48,43 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
-static int mlx4_alloc_page(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_alloc *frag,
- gfp_t gfp)
-{
- struct page *page;
- dma_addr_t dma;
-
- page = alloc_page(gfp);
- if (unlikely(!page))
- return -ENOMEM;
- dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
- if (unlikely(dma_mapping_error(priv->ddev, dma))) {
- __free_page(page);
- return -ENOMEM;
- }
- frag->page = page;
- frag->dma = dma;
- frag->page_offset = priv->rx_headroom;
- return 0;
-}
-
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
gfp_t gfp)
{
+ dma_addr_t dma;
int i;
for (i = 0; i < priv->num_frags; i++, frags++) {
if (!frags->page) {
- if (mlx4_alloc_page(priv, frags, gfp)) {
+ frags->page = page_pool_alloc_pages(ring->pp, gfp);
+ if (!frags->page) {
ring->alloc_fail++;
return -ENOMEM;
}
+ page_pool_fragment_page(frags->page, 1);
+ frags->page_offset = priv->rx_headroom;
+
ring->rx_alloc_pages++;
}
- rx_desc->data[i].addr = cpu_to_be64(frags->dma +
- frags->page_offset);
+ dma = page_pool_get_dma_addr(frags->page);
+ rx_desc->data[i].addr = cpu_to_be64(dma + frags->page_offset);
}
return 0;
}
static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_alloc *frag)
{
- if (frag->page) {
- dma_unmap_page(priv->ddev, frag->dma,
- PAGE_SIZE, priv->dma_dir);
- __free_page(frag->page);
- }
+ if (frag->page)
+ page_pool_put_full_page(ring->pp, frag->page, false);
/* We need to clear all fields, otherwise a change of priv->log_rx_info
* could lead to see garbage later in frag->page.
*/
@@ -141,18 +124,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
(index << ring->log_stride);
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
- if (likely(ring->page_cache.index > 0)) {
- /* XDP uses a single page per frame */
- if (!frags->page) {
- ring->page_cache.index--;
- frags->page = ring->page_cache.buf[ring->page_cache.index].page;
- frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
- }
- frags->page_offset = XDP_PACKET_HEADROOM;
- rx_desc->data[0].addr = cpu_to_be64(frags->dma +
- XDP_PACKET_HEADROOM);
- return 0;
- }
return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
}
@@ -178,7 +149,7 @@ static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- mlx4_en_free_frag(priv, frags + nr);
+ mlx4_en_free_frag(priv, ring, frags + nr);
}
}
@@ -268,6 +239,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
u32 size, u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct page_pool_params pp = {};
struct mlx4_en_rx_ring *ring;
int err = -ENOMEM;
int tmp;
@@ -286,9 +258,26 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
- if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
+ pp.flags = PP_FLAG_DMA_MAP;
+ pp.pool_size = size * DIV_ROUND_UP(priv->rx_skb_size, PAGE_SIZE);
+ pp.nid = node;
+ pp.napi = &priv->rx_cq[queue_index]->napi;
+ pp.netdev = priv->dev;
+ pp.dev = &mdev->dev->persist->pdev->dev;
+ pp.dma_dir = priv->dma_dir;
+
+ ring->pp = page_pool_create(&pp);
+ if (!ring->pp)
goto err_ring;
+ if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
+ goto err_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ ring->pp);
+ if (err)
+ goto err_xdp_info;
+
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
@@ -319,6 +308,8 @@ err_info:
ring->rx_info = NULL;
err_xdp_info:
xdp_rxq_info_unreg(&ring->xdp_rxq);
+err_pp:
+ page_pool_destroy(ring->pp);
err_ring:
kfree(ring);
*pring = NULL;
@@ -409,26 +400,6 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
}
}
-/* When the rx ring is running in page-per-packet mode, a released frame can go
- * directly into a small cache, to avoid unmapping or touching the page
- * allocator. In bpf prog performance scenarios, buffers are either forwarded
- * or dropped, never converted to skbs, so every page can come directly from
- * this cache when it is sized to be a multiple of the napi budget.
- */
-bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
- struct mlx4_en_rx_alloc *frame)
-{
- struct mlx4_en_page_cache *cache = &ring->page_cache;
-
- if (cache->index >= MLX4_EN_CACHE_SIZE)
- return false;
-
- cache->buf[cache->index].page = frame->page;
- cache->buf[cache->index].dma = frame->dma;
- cache->index++;
- return true;
-}
-
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
@@ -445,6 +416,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
kvfree(ring->rx_info);
+ page_pool_destroy(ring->pp);
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
@@ -453,14 +425,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
- int i;
-
- for (i = 0; i < ring->page_cache.index; i++) {
- dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(ring->page_cache.buf[i].page);
- }
- ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
@@ -487,7 +451,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
if (unlikely(!page))
goto fail;
- dma = frags->dma;
+ dma = page_pool_get_dma_addr(page);
dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
frag_size, priv->dma_dir);
@@ -498,6 +462,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
if (frag_info->frag_stride == PAGE_SIZE / 2) {
frags->page_offset ^= PAGE_SIZE / 2;
release = page_count(page) != 1 ||
+ atomic_long_read(&page->pp_ref_count) != 1 ||
page_is_pfmemalloc(page) ||
page_to_nid(page) != numa_mem_id();
} else if (!priv->rx_headroom) {
@@ -511,10 +476,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
}
if (release) {
- dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
frags->page = NULL;
} else {
- page_ref_inc(page);
+ page_pool_ref_page(page);
}
nr++;
@@ -784,7 +748,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Get pointer to first fragment since we haven't
* skb yet and cast it to ethhdr struct
*/
- dma = frags[0].dma + frags[0].page_offset;
+ dma = page_pool_get_dma_addr(frags[0].page);
+ dma += frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
@@ -823,7 +788,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
void *orig_data;
u32 act;
- dma = frags[0].dma + frags[0].page_offset;
+ dma = page_pool_get_dma_addr(frags[0].page);
+ dma += frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma,
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
@@ -886,6 +852,7 @@ xdp_drop_no_cnt:
skb = napi_get_frags(&cq->napi);
if (unlikely(!skb))
goto next;
+ skb_mark_for_recycle(skb);
if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
u64 timestamp = mlx4_en_get_cqe_ts(cqe);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1ddb11cb25f9..87f35bcbeff8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -44,6 +44,7 @@
#include <linux/ipv6.h>
#include <linux/indirect_call_wrapper.h>
#include <net/ipv6.h>
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
@@ -350,16 +351,10 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
- struct mlx4_en_rx_alloc frame = {
- .page = tx_info->page,
- .dma = tx_info->map0_dma,
- };
-
- if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
- dma_unmap_page(priv->ddev, tx_info->map0_dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(tx_info->page);
- }
+ struct page_pool *pool = ring->recycle_ring->pp;
+
+ /* Note that napi_mode = 0 means ndo_close() path, not budget = 0 */
+ page_pool_put_full_page(pool, tx_info->page, !!napi_mode);
return tx_info->nr_txbb;
}
@@ -450,6 +445,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
if (unlikely(!priv->port_up))
return 0;
+ if (unlikely(!napi_budget) && cq->type == TX_XDP)
+ return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
@@ -1194,7 +1191,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
data = &tx_desc->data;
- dma = frame->dma;
+ dma = page_pool_get_dma_addr(frame->page);
tx_info->page = frame->page;
frame->page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index d7d856d1758a..b213094ea30f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1478,12 +1478,6 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc);
u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
int align, u32 skip_mask, u32 *puid);
-/* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator
- * <zones>.
- */
-u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones,
- u32 uid, u32 obj, u32 count);
-
/* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of
* specifying the uid when freeing an object, zone allocator could figure it by
* itself. Other parameters are similar to mlx4_zone_free.
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 28b70dcc652e..ad0d91a75184 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -247,20 +247,11 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
- dma_addr_t dma;
u32 page_offset;
};
#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
-struct mlx4_en_page_cache {
- u32 index;
- struct {
- struct page *page;
- dma_addr_t dma;
- } buf[MLX4_EN_CACHE_SIZE];
-};
-
enum {
MLX4_EN_TX_RING_STATE_RECOVERING,
};
@@ -335,14 +326,14 @@ struct mlx4_en_rx_ring {
u16 stride;
u16 log_stride;
u16 cqn; /* index of port CQ associated with this ring */
+ u8 fcs_del;
u32 prod;
u32 cons;
u32 buf_size;
- u8 fcs_del;
+ struct page_pool *pp;
void *buf;
void *rx_info;
struct bpf_prog __rcu *xdp_prog;
- struct mlx4_en_page_cache page_cache;
unsigned long bytes;
unsigned long packets;
unsigned long csum_ok;
@@ -707,8 +698,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_priv *priv, unsigned int length,
int tx_ind, bool *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
-bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
- struct mlx4_en_rx_alloc *frame);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index d7444782bfdd..698a5d1f0d7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -106,7 +106,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
+ buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits),
GFP_KERNEL);
buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 4e43f4a7d246..e3d0b13c1610 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -147,26 +147,6 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
return err;
}
-int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_table *table = &info->mac_table;
- int i;
-
- for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
- if (!table->refs[i])
- continue;
-
- if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
- *idx = i;
- return 0;
- }
- }
-
- return -ENOENT;
-}
-EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
-
static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
{
int i, num_eth_ports = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index ea6070180c96..6ec7d6e0181d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -31,6 +31,7 @@ config MLX5_CORE_EN
bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
select PAGE_POOL
+ select PAGE_POOL_STATS
select DIMLIB
help
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
@@ -80,8 +81,8 @@ config MLX5_BRIDGE
default y
help
mlx5 ConnectX offloads support for Ethernet Bridging (BRIDGE).
- Enable adding representors of mlx5 uplink and VF ports to Bridge and
- offloading rules for traffic between such ports. Supports VLANs (trunk and
+ Enable offloading FDB rules from a bridge device containing
+ representors of mlx5 uplink and VF ports. Supports VLANs (trunk and
access modes).
config MLX5_CLS_ACT
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index be3d0876c521..d292e6a9e22c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
- fw_reset.o qos.o lib/tout.o lib/aso.o wc.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o
#
# Netdev basic
@@ -60,6 +60,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
ifneq ($(CONFIG_MLX5_TC_CT),)
mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o
+ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += en/tc/ct_fs_hmfs.o
endif
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
@@ -123,6 +124,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/sws/dr_domain.o \
steering/sws/dr_ste_v0.o \
steering/sws/dr_ste_v1.o \
steering/sws/dr_ste_v2.o \
+ steering/sws/dr_ste_v3.o \
steering/sws/dr_cmd.o \
steering/sws/dr_fw.o \
steering/sws/dr_action.o \
@@ -150,8 +152,10 @@ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/cmd.o \
steering/hws/bwc.o \
steering/hws/debug.o \
steering/hws/vport.o \
- steering/hws/bwc_complex.o
-
+ steering/hws/bwc_complex.o \
+ steering/hws/fs_hws_pools.o \
+ steering/hws/fs_hws.o \
+ steering/hws/action_ste_pool.o
#
# SF device
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 6bd8a18e3af3..b1aeea7c4a91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -94,6 +94,11 @@ static u16 in_to_opcode(void *in)
return MLX5_GET(mbox_in, in, opcode);
}
+static u16 in_to_uid(void *in)
+{
+ return MLX5_GET(mbox_in, in, uid);
+}
+
/* Returns true for opcodes that might be triggered very frequently and throttle
* the command interface. Limit their command slots usage.
*/
@@ -823,7 +828,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
- uid = MLX5_GET(mbox_in, in, uid);
+ uid = in_to_uid(in);
status = MLX5_GET(mbox_out, out, status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
@@ -922,8 +927,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
static void cb_timeout_handler(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work,
- work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_cmd_work_ent *ent = container_of(dwork,
struct mlx5_cmd_work_ent,
cb_timeout_work);
@@ -1013,6 +1017,7 @@ static void cmd_work_handler(struct work_struct *work)
complete(&ent->done);
}
up(&cmd->vars.sem);
+ complete(&ent->slotted);
return;
}
} else {
@@ -1870,6 +1875,17 @@ static int is_manage_pages(void *in)
return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
}
+static bool mlx5_has_privileged_uid(struct mlx5_core_dev *dev)
+{
+ return !xa_empty(&dev->cmd.vars.privileged_uids);
+}
+
+static bool mlx5_cmd_is_privileged_uid(struct mlx5_core_dev *dev,
+ u16 uid)
+{
+ return !!xa_load(&dev->cmd.vars.privileged_uids, uid);
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. Page queue commands do not support asynchrous completion
@@ -1880,7 +1896,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
{
struct mlx5_cmd_msg *inb, *outb;
u16 opcode = in_to_opcode(in);
- bool throttle_op;
+ bool throttle_locked = false;
+ bool unpriv_locked = false;
+ u16 uid = in_to_uid(in);
int pages_queue;
gfp_t gfp;
u8 token;
@@ -1889,12 +1907,17 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
return -ENXIO;
- throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
- if (throttle_op) {
- if (callback) {
- if (down_trylock(&dev->cmd.vars.throttle_sem))
- return -EBUSY;
- } else {
+ if (!callback) {
+ /* The semaphore is already held for callback commands. It was
+ * acquired in mlx5_cmd_exec_cb()
+ */
+ if (uid && mlx5_has_privileged_uid(dev)) {
+ if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
+ unpriv_locked = true;
+ down(&dev->cmd.vars.unprivileged_sem);
+ }
+ } else if (mlx5_cmd_is_throttle_opcode(opcode)) {
+ throttle_locked = true;
down(&dev->cmd.vars.throttle_sem);
}
}
@@ -1940,8 +1963,11 @@ out_out:
out_in:
free_msg(dev, inb);
out_up:
- if (throttle_op)
+ if (throttle_locked)
up(&dev->cmd.vars.throttle_sem);
+ if (unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
+
return err;
}
@@ -2103,18 +2129,22 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
struct mlx5_async_work *work = _work;
struct mlx5_async_ctx *ctx;
struct mlx5_core_dev *dev;
- u16 opcode;
+ bool throttle_locked;
+ bool unpriv_locked;
ctx = work->ctx;
dev = ctx->dev;
- opcode = work->opcode;
+ throttle_locked = work->throttle_locked;
+ unpriv_locked = work->unpriv_locked;
status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
/* Can't access "work" from this point on. It could have been freed in
* the callback.
*/
- if (mlx5_cmd_is_throttle_opcode(opcode))
+ if (throttle_locked)
up(&dev->cmd.vars.throttle_sem);
+ if (unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
}
@@ -2123,6 +2153,8 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
void *out, int out_size, mlx5_async_cbk_t callback,
struct mlx5_async_work *work)
{
+ struct mlx5_core_dev *dev = ctx->dev;
+ u16 uid;
int ret;
work->ctx = ctx;
@@ -2130,11 +2162,43 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->opcode = in_to_opcode(in);
work->op_mod = MLX5_GET(mbox_in, in, op_mod);
work->out = out;
+ work->throttle_locked = false;
+ work->unpriv_locked = false;
+ uid = in_to_uid(in);
+
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
- ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
+
+ if (uid && mlx5_has_privileged_uid(dev)) {
+ if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
+ if (down_trylock(&dev->cmd.vars.unprivileged_sem)) {
+ ret = -EBUSY;
+ goto dec_num_inflight;
+ }
+ work->unpriv_locked = true;
+ }
+ } else if (mlx5_cmd_is_throttle_opcode(in_to_opcode(in))) {
+ if (down_trylock(&dev->cmd.vars.throttle_sem)) {
+ ret = -EBUSY;
+ goto dec_num_inflight;
+ }
+ work->throttle_locked = true;
+ }
+
+ ret = cmd_exec(dev, in, in_size, out, out_size,
mlx5_cmd_exec_cb_handler, work, false);
- if (ret && atomic_dec_and_test(&ctx->num_inflight))
+ if (ret)
+ goto sem_up;
+
+ return 0;
+
+sem_up:
+ if (work->throttle_locked)
+ up(&dev->cmd.vars.throttle_sem);
+ if (work->unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
+dec_num_inflight:
+ if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
return ret;
@@ -2370,10 +2434,16 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
sema_init(&cmd->vars.pages_sem, 1);
sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+ sema_init(&cmd->vars.unprivileged_sem,
+ DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+
+ xa_init(&cmd->vars.privileged_uids);
cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
- if (!cmd->pool)
- return -ENOMEM;
+ if (!cmd->pool) {
+ err = -ENOMEM;
+ goto err_destroy_xa;
+ }
err = alloc_cmd_page(dev, cmd);
if (err)
@@ -2407,6 +2477,8 @@ err_cmd_page:
free_cmd_page(dev, cmd);
err_free_pool:
dma_pool_destroy(cmd->pool);
+err_destroy_xa:
+ xa_destroy(&dev->cmd.vars.privileged_uids);
return err;
}
@@ -2419,6 +2491,7 @@ void mlx5_cmd_disable(struct mlx5_core_dev *dev)
destroy_msg_cache(dev);
free_cmd_page(dev, cmd);
dma_pool_destroy(cmd->pool);
+ xa_destroy(&dev->cmd.vars.privileged_uids);
}
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
@@ -2426,3 +2499,18 @@ void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
{
dev->cmd.state = cmdif_state;
}
+
+int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
+{
+ return xa_insert(&dev->cmd.vars.privileged_uids, uid,
+ xa_mk_value(uid), GFP_KERNEL);
+}
+EXPORT_SYMBOL(mlx5_cmd_add_privileged_uid);
+
+void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
+{
+ void *data = xa_erase(&dev->cmd.vars.privileged_uids, uid);
+
+ WARN(!data, "Privileged UID %u does not exist\n", uid);
+}
+EXPORT_SYMBOL(mlx5_cmd_remove_privileged_uid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 9a79674d27f1..891bbbbfbbf1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -228,8 +228,15 @@ enum {
MLX5_INTERFACE_PROTOCOL_VNET,
MLX5_INTERFACE_PROTOCOL_DPLL,
+ MLX5_INTERFACE_PROTOCOL_FWCTL,
};
+static bool is_fwctl_supported(struct mlx5_core_dev *dev)
+{
+ /* fwctl is most useful on PFs, prevent fwctl on SFs for now */
+ return MLX5_CAP_GEN(dev, uctx_cap) && !mlx5_core_is_sf(dev);
+}
+
static const struct mlx5_adev_device {
const char *suffix;
bool (*is_supported)(struct mlx5_core_dev *dev);
@@ -252,6 +259,8 @@ static const struct mlx5_adev_device {
.is_supported = &is_mp_supported },
[MLX5_INTERFACE_PROTOCOL_DPLL] = { .suffix = "dpll",
.is_supported = &is_dpll_supported },
+ [MLX5_INTERFACE_PROTOCOL_FWCTL] = { .suffix = "fwctl",
+ .is_supported = &is_fwctl_supported },
};
int mlx5_adev_idx_alloc(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 98d4306929f3..73cd74644378 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -46,6 +46,9 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
u32 running_fw, stored_fw;
int err;
+ if (!mlx5_core_is_pf(dev))
+ return 0;
+
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
@@ -324,7 +327,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
.rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set,
.rate_node_new = mlx5_esw_devlink_rate_node_new,
.rate_node_del = mlx5_esw_devlink_rate_node_del,
- .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set,
+ .rate_leaf_parent_set = mlx5_esw_devlink_rate_leaf_parent_set,
+ .rate_node_parent_set = mlx5_esw_devlink_rate_node_parent_set,
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
.port_new = mlx5_devlink_sf_port_new,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 9aed29fa4900..d6e736c1fb24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -292,7 +292,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
if (rule->dest_attr.type &
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
__entry->counter_id =
- rule->dest_attr.counter_id;
+ mlx5_fc_id(rule->dest_attr.counter);
),
TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n",
__entry->rule, __entry->fte, __entry->index,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
index c7216e84ef8c..86253a89c24c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -13,6 +13,50 @@ struct mlx5_vnic_diag_stats {
__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
+static void mlx5_reporter_vnic_diagnose_counter_icm(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg,
+ u16 vport_num, bool other_vport)
+{
+ u32 out_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
+ u32 in_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
+ u32 out_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
+ u32 in_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
+ u32 cur_alloc_icm;
+ int vhca_icm_ctrl;
+ u16 vhca_id;
+ int err;
+
+ err = mlx5_core_access_reg(dev, in_reg, sizeof(in_reg), out_reg,
+ sizeof(out_reg), MLX5_REG_NIC_CAP, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "Reading nic_cap_reg failed. err = %d\n", err);
+ return;
+ }
+ vhca_icm_ctrl = MLX5_GET(nic_cap_reg, out_reg, vhca_icm_ctrl);
+ if (!vhca_icm_ctrl)
+ return;
+
+ MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id_valid, other_vport);
+ if (other_vport) {
+ err = mlx5_vport_get_vhca_id(dev, vport_num, &vhca_id);
+ if (err) {
+ mlx5_core_warn(dev, "vport to vhca_id failed. vport_num = %d, err = %d\n",
+ vport_num, err);
+ return;
+ }
+ MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id, vhca_id);
+ }
+ err = mlx5_core_access_reg(dev, in_icm_reg, sizeof(in_icm_reg),
+ out_icm_reg, sizeof(out_icm_reg),
+ MLX5_REG_VHCA_ICM_CTRL, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "Reading vhca_icm_ctrl failed. err = %d\n", err);
+ return;
+ }
+ cur_alloc_icm = MLX5_GET(vhca_icm_ctrl_reg, out_icm_reg, cur_alloc_icm);
+ devlink_fmsg_u32_pair_put(fmsg, "icm_consumption", cur_alloc_icm);
+}
+
void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
struct devlink_fmsg *fmsg,
u16 vport_num, bool other_vport)
@@ -59,6 +103,8 @@ void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
}
+ if (MLX5_CAP_GEN(dev, nic_cap_reg))
+ mlx5_reporter_vnic_diagnose_counter_icm(dev, fmsg, vport_num, other_vport);
devlink_fmsg_obj_nest_end(fmsg);
devlink_fmsg_pair_nest_end(fmsg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 31142f6cc372..1e5522a19483 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -242,7 +242,7 @@ static int mlx5_dpll_clock_quality_level_get(const struct dpll_device *dpll,
return 0;
}
errout:
- NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 979fc56205e1..5b0d03b3efe8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -95,8 +95,6 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
-#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
-
/* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
* These are theoretical maximums, which can be further restricted by
* capabilities. These values are used for static resource allocations and
@@ -232,16 +230,22 @@ struct mlx5e_rx_wqe_cyc {
DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data);
};
-struct mlx5e_umr_wqe {
+struct mlx5e_umr_wqe_hdr {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
+};
+
+struct mlx5e_umr_wqe {
+ struct mlx5e_umr_wqe_hdr hdr;
union {
DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms);
};
};
+static_assert(offsetof(struct mlx5e_umr_wqe, inline_mtts) == sizeof(struct mlx5e_umr_wqe_hdr),
+ "struct members should be included in struct mlx5e_umr_wqe_hdr, not in struct mlx5e_umr_wqe");
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER,
@@ -386,7 +390,6 @@ enum {
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
- MLX5E_SQ_STATE_XDP_MULTIBUF,
MLX5E_NUM_SQ_STATES, /* Must be kept last */
};
@@ -395,6 +398,7 @@ struct mlx5e_tx_mpwqe {
struct mlx5e_tx_wqe *wqe;
u32 bytes_count;
u8 ds_count;
+ u8 ds_count_max;
u8 pkt_count;
u8 inline_on;
};
@@ -516,6 +520,12 @@ struct mlx5e_xdpsq {
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
+struct mlx5e_xdp_buff {
+ struct xdp_buff xdp;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_rq *rq;
+};
+
struct mlx5e_ktls_resync_resp;
struct mlx5e_icosq {
@@ -660,7 +670,7 @@ struct mlx5e_rq {
} wqe;
struct {
struct mlx5_wq_ll wq;
- struct mlx5e_umr_wqe umr_wqe;
+ struct mlx5e_umr_wqe_hdr umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
__be32 umr_mkey_be;
@@ -712,6 +722,7 @@ struct mlx5e_rq {
struct mlx5e_xdpsq *xdpsq;
DECLARE_BITMAP(flags, 8);
struct page_pool *page_pool;
+ struct mlx5e_xdp_buff mxbuf;
/* AF_XDP zero-copy */
struct xsk_buff_pool *xsk_pool;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 1e8b7d330701..b5c3a2a9d2a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -84,9 +84,9 @@ enum {
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
- MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
- MLX5E_ACCEL_FS_ESP_FT_LEVEL,
+ MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+ MLX5E_ACCEL_FS_POL_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 81523825faa2..cb972b2d46e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -114,6 +114,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
int err = 0;
rtnl_lock();
+ netdev_lock(priv->netdev);
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
@@ -123,6 +124,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
out:
mutex_unlock(&priv->state_lock);
+ netdev_unlock(priv->netdev);
rtnl_unlock();
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 64b62ed17b07..58ec5e44aa7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -10,6 +10,9 @@
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
+#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
+#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17
+
static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
{
u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
@@ -103,18 +106,22 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
- u8 max_pages_per_wqe, max_log_mpwqe_size;
+ u8 max_pages_per_wqe, max_log_wqe_size_calc;
+ u8 max_log_wqe_size_cap;
u16 max_wqe_size;
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
- max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
+ max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift;
+
+ WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU);
- WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
+ max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ?
+ MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ;
- return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
+ return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap);
}
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
@@ -423,7 +430,7 @@ u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
- PAGE_SIZE;
+ MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
}
@@ -827,7 +834,8 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
+ int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
+ MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
@@ -1036,7 +1044,8 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
- int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
+ int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
+ MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
@@ -1240,7 +1249,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
- param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 3f8986f9d862..bd5877acc5b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -33,7 +33,6 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
bool is_mpw;
bool is_tls;
- bool is_xdp_mb;
u16 stop_room;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 5f6a0605e4ae..6049ccf475bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -80,6 +80,7 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
struct mlx5_port_eth_proto eproto;
+ const struct mlx5_link_info *info;
bool force_legacy = false;
bool ext;
int err;
@@ -94,9 +95,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
goto out;
}
- *speed = mlx5_port_ptys2speed(mdev, eproto.oper, force_legacy);
- if (!(*speed))
+ info = mlx5_port_ptys2info(mdev, eproto.oper, force_legacy);
+ if (!info) {
+ *speed = SPEED_UNKNOWN;
err = -EINVAL;
+ goto out;
+ }
+ *speed = info->speed;
out:
return err;
@@ -296,11 +301,16 @@ enum mlx5e_fec_supported_link_mode {
MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X,
MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X,
MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X,
MLX5E_MAX_FEC_SUPPORTED_LINK_MODE,
};
#define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X
#define MLX5E_FEC_FIRST_100G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X
+#define MLX5E_FEC_FIRST_200G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X
#define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \
do { \
@@ -320,8 +330,10 @@ static bool mlx5e_is_fec_supported_link_mode(struct mlx5_core_dev *dev,
return link_mode < MLX5E_FEC_FIRST_50G_PER_LANE_MODE ||
(link_mode < MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm)) ||
- (link_mode >= MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
- MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm));
+ (link_mode < MLX5E_FEC_FIRST_200G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm)) ||
+ (link_mode >= MLX5E_FEC_FIRST_200G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_200G_per_lane_in_pplm));
}
/* get/set FEC admin field for a given speed */
@@ -368,6 +380,18 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 1600g_8x);
+ break;
default:
return -EINVAL;
}
@@ -421,6 +445,18 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
*fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 1600g_8x);
+ break;
default:
return -EINVAL;
}
@@ -494,6 +530,26 @@ out:
return 0;
}
+static u16 mlx5e_remap_fec_conf_mode(enum mlx5e_fec_supported_link_mode link_mode,
+ u16 conf_fec)
+{
+ /* RS fec in ethtool is originally mapped to MLX5E_FEC_RS_528_514.
+ * For link modes up to 25G per lane, the value is kept.
+ * For 50G or 100G per lane, it's remapped to MLX5E_FEC_RS_544_514.
+ * For 200G per lane, remapped to MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD.
+ */
+ if (conf_fec != BIT(MLX5E_FEC_RS_528_514))
+ return conf_fec;
+
+ if (link_mode >= MLX5E_FEC_FIRST_200G_PER_LANE_MODE)
+ return BIT(MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD);
+
+ if (link_mode >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
+ return BIT(MLX5E_FEC_RS_544_514);
+
+ return conf_fec;
+}
+
int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
{
bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
@@ -530,14 +586,7 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
- /* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514
- * to link modes up to 25G per lane and to
- * MLX5E_FEC_RS_544_514 in the new link modes based on
- * 50G or 100G per lane
- */
- if (conf_fec == (1 << MLX5E_FEC_RS_528_514) &&
- i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
- conf_fec = (1 << MLX5E_FEC_RS_544_514);
+ conf_fec = mlx5e_remap_fec_conf_mode(i, conf_fec);
mlx5e_get_fec_cap_field(out, &fec_caps, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index d1da225f35da..fa2283dd383b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -61,6 +61,7 @@ enum {
MLX5E_FEC_NOFEC,
MLX5E_FEC_FIRECODE,
MLX5E_FEC_RS_528_514,
+ MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD = 4,
MLX5E_FEC_RS_544_514 = 7,
MLX5E_FEC_LLRS_272_257_1 = 9,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index afd654583b6b..5d0014129a7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -8,6 +8,7 @@
#include "en/fs_tt_redirect.h"
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <net/netdev_lock.h>
struct mlx5e_ptp_fs {
struct mlx5_flow_handle *l2_rule;
@@ -326,7 +327,7 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
int node;
sq->pdev = c->pdev;
- sq->clock = &mdev->clock;
+ sq->clock = mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->priv = c->priv;
@@ -449,8 +450,22 @@ static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
{
struct mlx5e_ptpsq *ptpsq =
container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+
+ /* Recovering the PTP SQ means re-enabling NAPI, which requires the
+ * netdev instance lock. However, SQ closing has to wait for this work
+ * task to finish while also holding the same lock. So either get the
+ * lock or find that the SQ is no longer enabled and thus this work is
+ * not relevant anymore.
+ */
+ while (!netdev_trylock(sq->netdev)) {
+ if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
+ return;
+ msleep(20);
+ }
mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
+ netdev_unlock(sq->netdev);
}
static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
@@ -696,7 +711,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
rq->pdev = c->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
- rq->clock = &mdev->clock;
+ rq->clock = mdev->clock;
rq->tstamp = &priv->tstamp;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
@@ -892,7 +907,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (err)
goto err_free;
- netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll);
+ netif_napi_add_locked(netdev, &c->napi, mlx5e_ptp_napi_poll);
mlx5e_ptp_build_params(c, cparams, params);
@@ -910,7 +925,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
return 0;
err_napi_del:
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
err_free:
kvfree(cparams);
kvfree(c);
@@ -920,7 +935,7 @@ err_free:
void mlx5e_ptp_close(struct mlx5e_ptp *c)
{
mlx5e_ptp_close_queues(c);
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
kvfree(c);
}
@@ -929,7 +944,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
{
int tc;
- napi_enable(&c->napi);
+ napi_enable_locked(&c->napi);
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
for (tc = 0; tc < c->num_tc; tc++)
@@ -957,7 +972,7 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
}
- napi_disable(&c->napi);
+ napi_disable_locked(&c->napi);
}
int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 5d128c5b4529..0f5d7ea8956f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -48,15 +48,10 @@ mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
struct list_head *iter;
netdev_for_each_lower_dev(dev, lower, iter) {
- struct mlx5_core_dev *mdev;
- struct mlx5e_priv *priv;
-
if (!mlx5e_eswitch_rep(lower))
continue;
- priv = netdev_priv(lower);
- mdev = priv->mdev;
- if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
+ if (mlx5_esw_bridge_dev_same_esw(lower, esw))
return lower;
}
@@ -125,7 +120,7 @@ static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *
priv = netdev_priv(rep);
mdev = priv->mdev;
if (netif_is_lag_master(dev))
- return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
+ return mlx5_lag_is_master(mdev);
return true;
}
@@ -455,6 +450,9 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
if (!rep)
return NOTIFY_DONE;
+ if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev))
+ return NOTIFY_DONE;
+
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
fdb_info = container_of(info,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 25d751eba99b..e75759533ae0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -317,10 +317,8 @@ mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx
}
static void
-mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+mlx5e_rx_reporter_diagnose_common_config(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
@@ -340,20 +338,100 @@ static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
devlink_fmsg_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static void mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(struct mlx5e_rx_res *rx_res,
+ struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ unsigned int max_nch = mlx5e_rx_res_get_max_nch(rx_res);
int i;
- mutex_lock(&priv->state_lock);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "Direct TIRs");
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
+ for (i = 0; i < max_nch; i++) {
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "ix", i);
+ devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rx_res_get_tirn_direct(rx_res, i));
+ devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rx_res_get_rqtn_direct(rx_res, i));
+
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(struct mlx5e_rss *rss, bool inner,
+ struct devlink_fmsg *fmsg)
+{
+ bool found_valid_tir = false;
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ if (!mlx5e_rss_valid_tir(rss, tt, inner))
+ continue;
+
+ if (!found_valid_tir) {
+ char *tir_msg = inner ? "Inner TIRs Numbers" : "TIRs Numbers";
+
+ found_valid_tir = true;
+ devlink_fmsg_arr_pair_nest_start(fmsg, tir_msg);
+ }
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_string_pair_put(fmsg, "tt", mlx5_ttc_get_name(tt));
+ devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rss_get_tirn(rss, tt, inner));
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+
+ if (found_valid_tir)
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss_ix(struct mlx5e_rx_res *rx_res, u32 rss_idx,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_rss *rss = mlx5e_rx_res_rss_get(rx_res, rss_idx);
+
+ if (!rss)
+ return;
+
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "Index", rss_idx);
+ devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rss_get_rqtn(rss));
+ mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, false, fmsg);
+ if (mlx5e_rss_get_inner_ft_support(rss))
+ mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, true, fmsg);
+
+ devlink_fmsg_obj_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss(struct mlx5e_rx_res *rx_res,
+ struct devlink_fmsg *fmsg)
+{
+ int rss_ix;
+
+ devlink_fmsg_arr_pair_nest_start(fmsg, "RSS");
+ for (rss_ix = 0; rss_ix < MLX5E_MAX_NUM_RSS; rss_ix++)
+ mlx5e_rx_reporter_diagnose_rx_res_rss_ix(rx_res, rss_ix, fmsg);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_rx_res *rx_res = priv->rx_res;
+
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX resources");
+ mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(rx_res, fmsg);
+ mlx5e_rx_reporter_diagnose_rx_res_rss(rx_res, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ int i;
- mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
for (i = 0; i < priv->channels.num; i++) {
@@ -367,7 +445,24 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
}
if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
+
devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ mlx5e_rx_reporter_diagnose_common_config(priv, fmsg);
+ mlx5e_rx_reporter_diagnose_rqs(priv, fmsg);
+ mlx5e_rx_reporter_diagnose_rx_res(priv, fmsg);
unlock:
mutex_unlock(&priv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 09433b91be17..c3bda4612fa9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -16,7 +16,6 @@ static const char * const sq_sw_state_type_name[] = {
[MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
- [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
};
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
@@ -108,9 +107,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
- rtnl_lock();
mlx5e_activate_txqsq(sq);
- rtnl_unlock();
if (sq->channel)
mlx5e_trigger_napi_icosq(sq->channel);
@@ -184,16 +181,12 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
- rtnl_lock();
mlx5e_deactivate_priv_channels(priv);
- rtnl_unlock();
mlx5e_ptp_close(chs->ptp);
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
- rtnl_lock();
mlx5e_activate_priv_channels(priv);
- rtnl_unlock();
/* return carrier back if needed */
if (carrier_ok)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 5f742f896600..74cd111ee320 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -81,6 +81,11 @@ struct mlx5e_rss {
refcount_t refcnt;
};
+bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss)
+{
+ return rss->inner_ft_support;
+}
+
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
{
rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
@@ -156,6 +161,7 @@ static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
{
enum mlx5_traffic_types tt;
+ rss->hash.symmetric = true;
rss->hash.hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(rss->hash.toeplitz_hash_key,
sizeof(rss->hash.toeplitz_hash_key));
@@ -449,6 +455,16 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
return mlx5e_tir_get_tirn(tir);
}
+u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss)
+{
+ return mlx5e_rqt_get_rqtn(&rss->rqt);
+}
+
+bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner)
+{
+ return !!rss_get_tir(rss, tt, inner);
+}
+
/* Fill the "tirn" output parameter.
* Create the requested TIR if it's its first usage.
*/
@@ -551,7 +567,7 @@ inner_tir:
return final_err;
}
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
+int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
{
if (indir)
memcpy(indir, rss->indir.table,
@@ -564,11 +580,14 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
if (hfunc)
*hfunc = rss->hash.hfunc;
+ if (symmetric)
+ *symmetric = rss->hash.symmetric;
+
return 0;
}
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
- const u8 *key, const u8 *hfunc,
+ const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
bool changed_indir = false;
@@ -608,6 +627,11 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
rss->indir.actual_table_size * sizeof(*rss->indir.table));
}
+ if (symmetric) {
+ rss->hash.symmetric = *symmetric;
+ changed_hash = true;
+ }
+
if (changed_indir && rss->enabled) {
err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index d0df98963c8d..8ac902190010 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -32,8 +32,11 @@ void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss);
unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss);
+bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss);
u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner);
+bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner);
+u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss);
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
@@ -44,9 +47,9 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss);
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
struct mlx5e_packet_merge_param *pkt_merge_param);
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
+int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
- const u8 *key, const u8 *hfunc,
+ const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss);
u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index a86eade9a9e0..5fcbe47337b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -5,8 +5,6 @@
#include "channels.h"
#include "params.h"
-#define MLX5E_MAX_NUM_RSS 16
-
struct mlx5e_rx_res {
struct mlx5_core_dev *mdev; /* primary */
enum mlx5e_rx_res_features features;
@@ -196,7 +194,7 @@ void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int n
}
int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc)
+ u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
{
struct mlx5e_rss *rss;
@@ -207,11 +205,12 @@ int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_get_rxfh(rss, indir, key, hfunc);
+ return mlx5e_rss_get_rxfh(rss, indir, key, hfunc, symmetric);
}
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- const u32 *indir, const u8 *key, const u8 *hfunc)
+ const u32 *indir, const u8 *key, const u8 *hfunc,
+ const bool *symmetric)
{
u32 *vhca_ids = get_vhca_ids(res, 0);
struct mlx5e_rss *rss;
@@ -223,8 +222,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids,
- res->rss_nch);
+ return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, symmetric,
+ res->rss_rqns, vhca_ids, res->rss_nch);
}
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
@@ -497,6 +496,11 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
mlx5e_rx_res_free(res);
}
+unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res)
+{
+ return res->max_nch;
+}
+
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
@@ -522,7 +526,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
return mlx5e_tir_get_tirn(&res->ptp.tir);
}
-static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 7b1a9f0f1874..3e09d91281af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -10,6 +10,8 @@
#include "fs.h"
#include "rss.h"
+#define MLX5E_MAX_NUM_RSS 16
+
struct mlx5e_rx_res;
struct mlx5e_channels;
@@ -34,6 +36,9 @@ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
+unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res);
+bool mlx5_rx_res_rss_inner_ft_support(struct mlx5e_rx_res *res);
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
@@ -44,9 +49,10 @@ void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *ch
/* Configuration API */
void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc);
+ u32 *indir, u8 *key, u8 *hfunc, bool *symmetric);
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- const u32 *indir, const u8 *key, const u8 *hfunc);
+ const u32 *indir, const u8 *key, const u8 *hfunc,
+ const bool *symmetric);
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
enum mlx5_traffic_types tt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index d6c12d0ea55b..2e528b2c34d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -73,11 +73,6 @@ struct mlx5e_tc_act {
bool is_terminating_action;
};
-struct mlx5e_tc_flow_action {
- unsigned int num_entries;
- struct flow_action_entry **entries;
-};
-
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
extern struct mlx5e_tc_act mlx5e_tc_act_trap;
extern struct mlx5e_tc_act mlx5e_tc_act_accept;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index feeb41693c17..b6cabe829f19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -5,6 +5,16 @@
#include "en/tc_priv.h"
#include "en/tc_ct.h"
+static bool
+tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index,
+ struct mlx5_flow_attr *attr)
+{
+ return !((act->ct.action & TCA_CT_ACT_COMMIT) &&
+ flow_action_is_last_entry(parse_state->flow_action, act));
+}
+
static int
tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -56,6 +66,7 @@ tc_act_is_missable_ct(const struct flow_action_entry *act)
}
struct mlx5e_tc_act mlx5e_tc_act_ct = {
+ .can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
.post_parse = tc_act_post_parse_ct,
.is_multi_table_act = tc_act_is_multi_table_act_ct,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
index 62b3f7ff5562..e5b30801314b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
@@ -48,4 +48,14 @@ mlx5_ct_fs_smfs_ops_get(void)
}
#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+#if IS_ENABLED(CONFIG_MLX5_HW_STEERING)
+struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void);
+#else
+static inline struct mlx5_ct_fs_ops *
+mlx5_ct_fs_hmfs_ops_get(void)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+
#endif /* __MLX5_EN_TC_CT_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
new file mode 100644
index 000000000000..a4263137fef5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "en_tc.h"
+#include "en/tc_ct.h"
+#include "en/tc_priv.h"
+#include "en/tc/ct_fs.h"
+#include "fs_core.h"
+#include "steering/hws/fs_hws_pools.h"
+#include "steering/hws/mlx5hws.h"
+#include "steering/hws/table.h"
+
+struct mlx5_ct_fs_hmfs_matcher {
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+ refcount_t ref;
+};
+
+/* We need {ipv4, ipv6} x {tcp, udp, gre} matchers. */
+#define NUM_MATCHERS (2 * 3)
+
+struct mlx5_ct_fs_hmfs {
+ struct mlx5hws_table *ct_tbl;
+ struct mlx5hws_table *ct_nat_tbl;
+ struct mlx5_flow_table *ct_nat;
+ struct mlx5hws_action *fwd_action;
+ struct mlx5hws_action *last_action;
+ struct mlx5hws_context *ctx;
+ struct mutex lock; /* Guards matchers */
+ struct mlx5_ct_fs_hmfs_matcher matchers[NUM_MATCHERS];
+ struct mlx5_ct_fs_hmfs_matcher matchers_nat[NUM_MATCHERS];
+};
+
+struct mlx5_ct_fs_hmfs_rule {
+ struct mlx5_ct_fs_rule fs_rule;
+ struct mlx5hws_bwc_rule *hws_bwc_rule;
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5_fc *counter;
+};
+
+static u32 get_matcher_idx(bool ipv4, bool tcp, bool gre)
+{
+ return ipv4 * 3 + tcp * 2 + gre;
+}
+
+static int mlx5_ct_fs_hmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ ct_tbl = ct->fs_hws_table.hws_table;
+ ct_nat_tbl = ct_nat->fs_hws_table.hws_table;
+ post_ct_tbl = post_ct->fs_hws_table.hws_table;
+ fs_hmfs->ct_nat = ct_nat;
+
+ if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to init, missing backing hws tables");
+ return -EOPNOTSUPP;
+ }
+
+ netdev_dbg(fs->netdev, "using hmfs steering");
+
+ fs_hmfs->ct_tbl = ct_tbl;
+ fs_hmfs->ct_nat_tbl = ct_nat_tbl;
+ fs_hmfs->ctx = ct_tbl->ctx;
+ mutex_init(&fs_hmfs->lock);
+
+ fs_hmfs->fwd_action = mlx5hws_action_create_dest_table(ct_tbl->ctx, post_ct_tbl, flags);
+ if (!fs_hmfs->fwd_action) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create fwd action\n");
+ return -EINVAL;
+ }
+ fs_hmfs->last_action = mlx5hws_action_create_last(ct_tbl->ctx, flags);
+ if (!fs_hmfs->last_action) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create last action\n");
+ mlx5hws_action_destroy(fs_hmfs->fwd_action);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_ct_fs_hmfs_destroy(struct mlx5_ct_fs *fs)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ mlx5hws_action_destroy(fs_hmfs->last_action);
+ mlx5hws_action_destroy(fs_hmfs->fwd_action);
+}
+
+static struct mlx5hws_bwc_matcher *
+mlx5_ct_fs_hmfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5hws_table *tbl,
+ struct mlx5_flow_spec *spec, bool ipv4, bool tcp, bool gre)
+{
+ u8 match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
+ struct mlx5hws_match_parameters mask = {
+ .match_buf = spec->match_criteria,
+ .match_sz = sizeof(spec->match_criteria),
+ };
+ u32 priority = get_matcher_idx(ipv4, tcp, gre); /* Static priority based on params. */
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+
+ hws_bwc_matcher = mlx5hws_bwc_matcher_create(tbl, priority, match_criteria_enable, &mask);
+ if (!hws_bwc_matcher)
+ return ERR_PTR(-EINVAL);
+
+ return hws_bwc_matcher;
+}
+
+static struct mlx5_ct_fs_hmfs_matcher *
+mlx5_ct_fs_hmfs_matcher_get(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ bool nat, bool ipv4, bool tcp, bool gre)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ u32 matcher_idx = get_matcher_idx(ipv4, tcp, gre);
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+ struct mlx5hws_table *tbl;
+
+ hmfs_matcher = nat ?
+ (fs_hmfs->matchers_nat + matcher_idx) :
+ (fs_hmfs->matchers + matcher_idx);
+
+ if (refcount_inc_not_zero(&hmfs_matcher->ref))
+ return hmfs_matcher;
+
+ mutex_lock(&fs_hmfs->lock);
+
+ /* Retry with lock, as the matcher might be already created by another cpu. */
+ if (refcount_inc_not_zero(&hmfs_matcher->ref))
+ goto out_unlock;
+
+ tbl = nat ? fs_hmfs->ct_nat_tbl : fs_hmfs->ct_tbl;
+
+ hws_bwc_matcher = mlx5_ct_fs_hmfs_matcher_create(fs, tbl, spec, ipv4, tcp, gre);
+ if (IS_ERR(hws_bwc_matcher)) {
+ netdev_warn(fs->netdev,
+ "ct_fs_hmfs: failed to create bwc matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
+ nat, ipv4, tcp, gre, PTR_ERR(hws_bwc_matcher));
+
+ hmfs_matcher = ERR_CAST(hws_bwc_matcher);
+ goto out_unlock;
+ }
+
+ hmfs_matcher->hws_bwc_matcher = hws_bwc_matcher;
+ refcount_set(&hmfs_matcher->ref, 1);
+
+out_unlock:
+ mutex_unlock(&fs_hmfs->lock);
+ return hmfs_matcher;
+}
+
+static void
+mlx5_ct_fs_hmfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ if (!refcount_dec_and_mutex_lock(&hmfs_matcher->ref, &fs_hmfs->lock))
+ return;
+
+ mlx5hws_bwc_matcher_destroy(hmfs_matcher->hws_bwc_matcher);
+ mutex_unlock(&fs_hmfs->lock);
+}
+
+#define NUM_CT_HMFS_RULES 4
+
+static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs,
+ struct mlx5_flow_attr *attr,
+ struct mlx5hws_rule_action *rule_actions)
+{
+ struct mlx5_fs_hws_action *mh_action = &attr->modify_hdr->fs_hws_action;
+
+ memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions));
+ rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter);
+ /* Modify header is special, it may require extra arguments outside the action itself. */
+ if (mh_action->mh_data) {
+ rule_actions[1].modify_header.offset = mh_action->mh_data->offset;
+ rule_actions[1].modify_header.data = mh_action->mh_data->data;
+ }
+ rule_actions[1].action = mh_action->hws_action;
+ rule_actions[2].action = fs_hmfs->fwd_action;
+ rule_actions[3].action = fs_hmfs->last_action;
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_hmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+ struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ struct mlx5hws_match_parameters match_params = {
+ .match_buf = spec->match_value,
+ .match_sz = ARRAY_SIZE(spec->match_value),
+ };
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule;
+ bool nat, tcp, ipv4, gre;
+ int err;
+
+ if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ hmfs_rule = kzalloc(sizeof(*hmfs_rule), GFP_KERNEL);
+ if (!hmfs_rule)
+ return ERR_PTR(-ENOMEM);
+
+ nat = (attr->ft == fs_hmfs->ct_nat);
+ ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
+ tcp = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_TCP;
+ gre = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_GRE;
+
+ hmfs_matcher = mlx5_ct_fs_hmfs_matcher_get(fs, spec, nat, ipv4, tcp, gre);
+ if (IS_ERR(hmfs_matcher)) {
+ err = PTR_ERR(hmfs_matcher);
+ goto err_free_rule;
+ }
+ hmfs_rule->hmfs_matcher = hmfs_matcher;
+
+ mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
+ hmfs_rule->counter = attr->counter;
+
+ hmfs_rule->hws_bwc_rule =
+ mlx5hws_bwc_rule_create(hmfs_matcher->hws_bwc_matcher, &match_params,
+ spec->flow_context.flow_source, rule_actions);
+ if (!hmfs_rule->hws_bwc_rule) {
+ err = -EINVAL;
+ goto err_put_matcher;
+ }
+
+ return &hmfs_rule->fs_rule;
+
+err_put_matcher:
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_matcher);
+err_free_rule:
+ kfree(hmfs_rule);
+ return ERR_PTR(err);
+}
+
+static void mlx5_ct_fs_hmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_hmfs_rule,
+ fs_rule);
+ mlx5hws_bwc_rule_destroy(hmfs_rule->hws_bwc_rule);
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_rule->hmfs_matcher);
+ kfree(hmfs_rule);
+}
+
+static int mlx5_ct_fs_hmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_hmfs_rule,
+ fs_rule);
+ struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ int err;
+
+ mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
+
+ err = mlx5hws_bwc_rule_action_update(hmfs_rule->hws_bwc_rule, rule_actions);
+ if (err) {
+ mlx5_fc_put_hws_action(attr->counter);
+ return err;
+ }
+
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ hmfs_rule->counter = attr->counter;
+
+ return 0;
+}
+
+static struct mlx5_ct_fs_ops hmfs_ops = {
+ .ct_rule_add = mlx5_ct_fs_hmfs_ct_rule_add,
+ .ct_rule_del = mlx5_ct_fs_hmfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_hmfs_ct_rule_update,
+
+ .init = mlx5_ct_fs_hmfs_init,
+ .destroy = mlx5_ct_fs_hmfs_destroy,
+
+ .priv_size = sizeof(struct mlx5_ct_fs_hmfs),
+};
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void)
+{
+ return &hmfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 45737d039252..0c97c5899904 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -13,7 +13,6 @@
#define INIT_ERR_PREFIX "ct_fs_smfs init failed"
#define ct_dbg(fmt, args...)\
netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
-#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
struct mlx5_ct_fs_smfs_matcher {
struct mlx5dr_matcher *dr_matcher;
@@ -220,78 +219,6 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
mlx5_smfs_action_destroy(fs_smfs->fwd_action);
}
-static inline bool
-mlx5_tc_ct_valid_used_dissector_keys(const u64 used_keys)
-{
-#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
- const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
- DISS_BIT(META);
- const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
- DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
- DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
- DISS_BIT(PORTS);
- const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
- DISS_BIT(PORTS);
- const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
- const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
-
- return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
- used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
-}
-
-static bool
-mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
-{
- struct flow_match_ipv4_addrs ipv4_addrs;
- struct flow_match_ipv6_addrs ipv6_addrs;
- struct flow_match_control control;
- struct flow_match_basic basic;
- struct flow_match_ports ports;
- struct flow_match_tcp tcp;
-
- if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
- ct_dbg("rule uses unexpected dissectors (0x%016llx)",
- flow_rule->match.dissector->used_keys);
- return false;
- }
-
- flow_rule_match_basic(flow_rule, &basic);
- flow_rule_match_control(flow_rule, &control);
- flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
- flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
- if (basic.key->ip_proto != IPPROTO_GRE)
- flow_rule_match_ports(flow_rule, &ports);
- if (basic.key->ip_proto == IPPROTO_TCP)
- flow_rule_match_tcp(flow_rule, &tcp);
-
- if (basic.mask->n_proto != htons(0xFFFF) ||
- (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
- basic.mask->ip_proto != 0xFF ||
- (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
- basic.key->ip_proto != IPPROTO_GRE)) {
- ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
- ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
- basic.key->ip_proto, basic.mask->ip_proto);
- return false;
- }
-
- if (basic.key->ip_proto != IPPROTO_GRE &&
- (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
- ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
- ports.mask->src, ports.mask->dst);
- return false;
- }
-
- if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
- ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
- return false;
- }
-
- return true;
-}
-
static struct mlx5_ct_fs_rule *
mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
@@ -304,7 +231,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
int num_actions = 0, err;
bool nat, tcp, ipv4, gre;
- if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
+ if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
return ERR_PTR(-EOPNOTSUPP);
smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index 8218c892b161..7819fb297280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -593,3 +593,8 @@ mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
*drops = packets2;
*lastuse = max_t(u64, lastuse1, lastuse2);
}
+
+int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter)
+{
+ return meter->meters_obj->base_id;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
index 9b795cd106bb..d6afb6556875 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -72,4 +72,17 @@ void
mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse);
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline int
+mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter)
+{
+ return 0;
+}
+#endif /* CONFIG_MLX5_CLS_ACT */
+
#endif /* __MLX5_EN_FLOW_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index a84ebac2f011..81332cd4a582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1349,6 +1349,32 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
return 0;
}
+static bool
+mlx5_tc_ct_filter_legacy_non_nic_flows(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ struct flow_match_meta match;
+ struct net_device *netdev;
+ bool same_dev = false;
+
+ if (!is_mdev_legacy_mode(ct_priv->dev) ||
+ !flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return true;
+
+ flow_rule_match_meta(rule, &match);
+
+ if (!(match.key->ingress_ifindex & match.mask->ingress_ifindex))
+ return true;
+
+ netdev = dev_get_by_index(&init_net, match.key->ingress_ifindex);
+ same_dev = ct_priv->netdev == netdev;
+ dev_put(netdev);
+
+ return same_dev;
+}
+
static int
mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
void *cb_priv)
@@ -1361,6 +1387,9 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
switch (f->command) {
case FLOW_CLS_REPLACE:
+ if (!mlx5_tc_ct_filter_legacy_non_nic_flows(ft, f))
+ return -EOPNOTSUPP;
+
return mlx5_tc_ct_block_flow_offload_add(ft, f);
case FLOW_CLS_DESTROY:
return mlx5_tc_ct_block_flow_offload_del(ft, f);
@@ -2065,10 +2094,19 @@ mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
int err;
- if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
- ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
- ct_dbg("Using SMFS ct flow steering provider");
- fs_ops = mlx5_ct_fs_smfs_ops_get();
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_HMFS) {
+ ct_dbg("Using HMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_hmfs_ops_get();
+ } else if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
+ ct_dbg("Using SMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_smfs_ops_get();
+ }
+
+ if (!fs_ops) {
+ ct_dbg("Requested flow steering mode is not enabled.");
+ return -EOPNOTSUPP;
+ }
}
ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL);
@@ -2421,3 +2459,74 @@ out_inc_drop:
atomic_inc(&ct_priv->debugfs.stats.rx_dropped);
return false;
}
+
+static bool mlx5e_tc_ct_valid_used_dissector_keys(const u64 used_keys)
+{
+#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
+ const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
+ DISS_BIT(META);
+ const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
+ const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
+
+ return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
+ used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
+}
+
+bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule)
+{
+ struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
+ struct flow_match_control control;
+ struct flow_match_basic basic;
+ struct flow_match_ports ports;
+ struct flow_match_tcp tcp;
+
+ if (!mlx5e_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected dissectors (0x%016llx)",
+ flow_rule->match.dissector->used_keys);
+ return false;
+ }
+
+ flow_rule_match_basic(flow_rule, &basic);
+ flow_rule_match_control(flow_rule, &control);
+ flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
+ flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
+ if (basic.key->ip_proto != IPPROTO_GRE)
+ flow_rule_match_ports(flow_rule, &ports);
+ if (basic.key->ip_proto == IPPROTO_TCP)
+ flow_rule_match_tcp(flow_rule, &tcp);
+
+ if (basic.mask->n_proto != htons(0xFFFF) ||
+ (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
+ basic.mask->ip_proto != 0xFF ||
+ (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
+ basic.key->ip_proto != IPPROTO_GRE)) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
+ ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
+ basic.key->ip_proto, basic.mask->ip_proto);
+ return false;
+ }
+
+ if (basic.key->ip_proto != IPPROTO_GRE &&
+ (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
+ netdev_dbg(dev, "ct_debug: rule uses ports match (src 0x%04x, dst 0x%04x)",
+ ports.mask->src, ports.mask->dst);
+ return false;
+ }
+
+ if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected tcp match (flags 0x%02x)",
+ tcp.mask->flags);
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index b66c5f98067f..5e9dbdd4a5e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -128,6 +128,9 @@ bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
+#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
+bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule);
+
#else /* CONFIG_MLX5_TC_CT */
static inline struct mlx5_tc_ct_priv *
@@ -202,5 +205,12 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
return false;
}
+static inline bool
+mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev,
+ struct flow_rule *flow_rule)
+{
+ return false;
+}
+
#endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */
#endif /* __MLX5_EN_TC_CT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 721f35e59757..2162d776fe35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -31,8 +31,7 @@ static void mlx5e_tc_tun_route_attr_cleanup(struct mlx5e_tc_tun_route_attr *attr
{
if (attr->n)
neigh_release(attr->n);
- if (attr->route_dev)
- dev_put(attr->route_dev);
+ dev_put(attr->route_dev);
}
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
@@ -68,16 +67,14 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
* while holding rcu read lock. Take the net_device for correctness
* sake.
*/
- if (uplink_upper)
- dev_hold(uplink_upper);
+ dev_hold(uplink_upper);
rcu_read_unlock();
dst_is_lag_dev = (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
real_dev == uplink_upper &&
mlx5_lag_is_sriov(priv->mdev));
- if (uplink_upper)
- dev_put(uplink_upper);
+ dev_put(uplink_upper);
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 878cbdbf5ec8..a0fc76a1bc08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -5,6 +5,7 @@
#include <net/nexthop.h>
#include <net/ip_tunnels.h>
#include "tc_tun_encap.h"
+#include "fs_core.h"
#include "en_tc.h"
#include "tc_tun.h"
#include "rep/tc.h"
@@ -24,17 +25,24 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
- if (!route_dev || !netif_is_ovs_master(route_dev) ||
- attr->parse_attr->filter_dev == e->out_dev)
+ if (!route_dev || !netif_is_ovs_master(route_dev))
goto out;
+ if (priv->mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS &&
+ mlx5e_eswitch_uplink_rep(attr->parse_attr->filter_dev) &&
+ (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) {
+ mlx5_core_warn(priv->mdev,
+ "Matching on external port with encap + fwd to table actions is not allowed for firmware steering\n");
+ err = -EINVAL;
+ goto out;
+ }
+
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
MLX5E_TC_INT_PORT_EGRESS,
&attr->action, out_index);
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -744,8 +752,7 @@ static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw,
}
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -779,8 +786,7 @@ static int mlx5e_update_vf_tunnel(struct mlx5_eswitch *esw,
mlx5e_tc_match_to_reg_mod_hdr_change(esw->dev, mod_hdr_acts, VPORT_TO_REG, act_id, data);
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index e4e487c8431b..7a18a469961d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -140,7 +140,7 @@ static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
gbp_mask = (u32 *)&enc_opts.mask->data[0];
if (*gbp_mask & ~VXLAN_GBP_MASK) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)\n", *gbp_mask);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)", *gbp_mask);
return -EINVAL;
}
@@ -165,9 +165,6 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
struct flow_match_enc_keyid enc_keyid;
void *misc_c, *misc_v;
- misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
-
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
return 0;
@@ -182,6 +179,30 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f);
if (err)
return err;
+
+ /* We can't mix custom tunnel headers with symbolic ones and we
+ * don't have a symbolic field name for GBP, so we use custom
+ * tunnel headers in this case. We need hardware support to
+ * match on custom tunnel headers, but we already know it's
+ * supported because the previous call successfully checked for
+ * that.
+ */
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_5);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_5);
+
+ /* Shift by 8 to account for the reserved bits in the vxlan
+ * header after the VNI.
+ */
+ MLX5_SET(fte_match_set_misc5, misc_c, tunnel_header_1,
+ be32_to_cpu(enc_keyid.mask->keyid) << 8);
+ MLX5_SET(fte_match_set_misc5, misc_v, tunnel_header_1,
+ be32_to_cpu(enc_keyid.key->keyid) << 8);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+
+ return 0;
}
/* match on VNI is required */
@@ -195,6 +216,11 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index 11f724ad90db..19499072f67f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -124,7 +124,7 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
const size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, rss_hash->symmetric);
memcpy(rss_key, rss_hash->toeplitz_hash_key, len);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
index 857a84bcd53a..e8df3aaf6562 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
@@ -9,6 +9,7 @@
struct mlx5e_rss_params_hash {
u8 hfunc;
u8 toeplitz_hash_key[40];
+ bool symmetric;
};
struct mlx5e_rss_params_traffic_type {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 53ca16cb9c41..b5c19396e096 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -46,7 +46,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
rq->pdev = t->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
- rq->clock = &mdev->clock;
+ rq->clock = mdev->clock;
rq->tstamp = &priv->tstamp;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
@@ -149,7 +149,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
t->stats = &priv->trap_stats.ch;
- netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll);
+ netif_napi_add_locked(netdev, &t->napi, mlx5e_trap_napi_poll);
err = mlx5e_open_trap_rq(priv, t);
if (unlikely(err))
@@ -164,7 +164,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
err_close_trap_rq:
mlx5e_close_trap_rq(&t->rq);
err_napi_del:
- netif_napi_del(&t->napi);
+ netif_napi_del_locked(&t->napi);
kvfree(t);
return ERR_PTR(err);
}
@@ -173,13 +173,13 @@ void mlx5e_close_trap(struct mlx5e_trap *trap)
{
mlx5e_tir_destroy(&trap->tir);
mlx5e_close_trap_rq(&trap->rq);
- netif_napi_del(&trap->napi);
+ netif_napi_del_locked(&trap->napi);
kvfree(trap);
}
static void mlx5e_activate_trap(struct mlx5e_trap *trap)
{
- napi_enable(&trap->napi);
+ napi_enable_locked(&trap->napi);
mlx5e_activate_rq(&trap->rq);
mlx5e_trigger_napi_sched(&trap->napi);
}
@@ -189,7 +189,7 @@ void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
struct mlx5e_trap *trap = priv->en_trap;
mlx5e_deactivate_rq(&trap->rq);
- napi_disable(&trap->napi);
+ napi_disable_locked(&trap->napi);
}
static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv)
@@ -285,6 +285,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
+ netdev_lock(priv->netdev);
switch (trap_ctx->action) {
case DEVLINK_TRAP_ACTION_TRAP:
err = mlx5e_handle_action_trap(priv, trap_ctx->id);
@@ -297,6 +298,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_
trap_ctx->action);
err = -EINVAL;
}
+ netdev_unlock(priv->netdev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 5ec468268d1a..e837c21d3d21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -214,6 +214,19 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+static inline u16 mlx5e_txqsq_get_next_pi_anysize(struct mlx5e_txqsq *sq,
+ u16 *size)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi, contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ *size = min_t(u16, contig_wqebbs, sq->max_sq_mpw_wqebbs);
+
+ return pi;
+}
+
void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
@@ -358,9 +371,9 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
-static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
{
- return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+ return session->ds_count == session->ds_count_max;
}
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 94b291662087..5ce1b463b7a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -289,9 +289,9 @@ static u64 mlx5e_xsk_fill_timestamp(void *_priv)
ts = get_cqe_ts(priv->cqe);
if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev))
- return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts);
+ return mlx5_real_time_cyc2time(priv->cq->mdev->clock, ts);
- return mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts);
+ return mlx5_timecounter_cyc2time(priv->cq->mdev->clock, ts);
}
static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv)
@@ -390,6 +390,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
.wqe = wqe,
.bytes_count = 0,
.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .ds_count_max = sq->max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS,
.pkt_count = 0,
.inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on),
};
@@ -501,7 +502,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
- if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
+ if (unlikely(mlx5e_xdp_mpwqe_is_full(session)))
mlx5e_xdp_mpwqe_complete(sq);
stats->xmit++;
@@ -546,6 +547,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
bool inline_ok;
bool linear;
u16 pi;
+ int i;
struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -612,41 +614,33 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- int i;
-
- memset(&cseg->trailer, 0, sizeof(cseg->trailer));
- memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
-
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+ memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
- for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
- dma_addr_t addr;
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
- page_pool_get_dma_addr(skb_frag_page(frag)) +
- skb_frag_off(frag);
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
+ dma_addr_t addr;
- dseg->addr = cpu_to_be64(addr);
- dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
- dseg->lkey = sq->mkey_be;
- dseg++;
- }
+ addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+ page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ dseg->addr = cpu_to_be64(addr);
+ dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+ dseg->lkey = sq->mkey_be;
+ dseg++;
+ }
- sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = num_wqebbs,
- .num_pkts = 1,
- };
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->pc += num_wqebbs;
- } else {
- cseg->fm_ce_se = 0;
+ sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_pkts = 1,
+ };
- sq->pc++;
- }
+ sq->pc += num_wqebbs;
xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
@@ -713,8 +707,8 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
page = xdpi.page.page;
- /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
- * as we know this is a page_pool page.
+ /* No need to check page_pool_page_is_pp() as we
+ * know this is a page_pool page.
*/
page_pool_recycle_direct(page->pp, page);
} while (++n < num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index e054db1e10f8..46ab0a9e8cdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -45,12 +45,6 @@
(MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
sizeof(struct mlx5_wqe_inline_seg))
-struct mlx5e_xdp_buff {
- struct xdp_buff xdp;
- struct mlx5_cqe64 *cqe;
- struct mlx5e_rq *rq;
-};
-
/* XDP packets can be transmitted in different ways. On completion, we need to
* distinguish between them to clean up things in a proper way.
*/
@@ -182,13 +176,13 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
return cur;
}
-static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
{
if (session->inline_on)
return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
- max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+ session->ds_count_max;
- return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
+ return mlx5e_tx_mpwqe_is_full(session);
}
struct mlx5e_xdp_wqe_info {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 1b7132fa70de..2b05536d564a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -123,7 +123,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR);
/* Optimized for speed: keep in sync with mlx5e_mpwrq_umr_entry_size. */
@@ -134,7 +134,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
offset = offset * sizeof(struct mlx5_klm) * 2 / MLX5_OCTWORD;
else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE))
offset = offset * sizeof(struct mlx5_ksm) * 4 / MLX5_OCTWORD;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
icosq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
@@ -144,7 +144,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
icosq->pc += rq->mpwqe.umr_wqebbs;
- icosq->doorbell_cseg = &umr_wqe->ctrl;
+ icosq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 9240cfe25d10..d743e823362a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -72,7 +72,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->netdev = c->netdev;
rq->priv = c->priv;
rq->tstamp = c->tstamp;
- rq->clock = &mdev->clock;
+ rq->clock = mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index ca92e518be76..77f61cd28a79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -94,25 +94,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
u32 esn, esn_msb;
u8 overlap;
- switch (x->xso.type) {
- case XFRM_DEV_OFFLOAD_PACKET:
- switch (x->xso.dir) {
- case XFRM_DEV_OFFLOAD_IN:
- esn = x->replay_esn->seq;
- esn_msb = x->replay_esn->seq_hi;
- break;
- case XFRM_DEV_OFFLOAD_OUT:
- esn = x->replay_esn->oseq;
- esn_msb = x->replay_esn->oseq_hi;
- break;
- default:
- WARN_ON(true);
- return false;
- }
- break;
- case XFRM_DEV_OFFLOAD_CRYPTO:
- /* Already parsed by XFRM core */
+ switch (x->xso.dir) {
+ case XFRM_DEV_OFFLOAD_IN:
esn = x->replay_esn->seq;
+ esn_msb = x->replay_esn->seq_hi;
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ esn = x->replay_esn->oseq;
+ esn_msb = x->replay_esn->oseq_hi;
break;
default:
WARN_ON(true);
@@ -121,11 +110,15 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
overlap = sa_entry->esn_state.overlap;
- if (esn >= x->replay_esn->replay_window)
- seq_bottom = esn - x->replay_esn->replay_window + 1;
+ if (!x->replay_esn->replay_window) {
+ seq_bottom = esn;
+ } else {
+ if (esn >= x->replay_esn->replay_window)
+ seq_bottom = esn - x->replay_esn->replay_window + 1;
- if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
- esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+ if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
+ esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+ }
if (sa_entry->esn_state.esn_msb)
sa_entry->esn_state.esn = esn;
@@ -266,8 +259,7 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
- struct xfrm_state *x = sa_entry->x;
- struct net_device *netdev;
+ struct net_device *netdev = sa_entry->dev;
struct neighbour *n;
u8 addr[ETH_ALEN];
const void *pkey;
@@ -277,19 +269,17 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->type != XFRM_DEV_OFFLOAD_PACKET)
return;
- netdev = x->xso.real_dev;
-
mlx5_query_mac_address(mdev, addr);
switch (attrs->dir) {
case XFRM_DEV_OFFLOAD_IN:
src = attrs->dmac;
dst = attrs->smac;
- pkey = &attrs->saddr.a4;
+ pkey = &attrs->addrs.saddr.a4;
break;
case XFRM_DEV_OFFLOAD_OUT:
src = attrs->smac;
dst = attrs->dmac;
- pkey = &attrs->daddr.a4;
+ pkey = &attrs->addrs.daddr.a4;
break;
default:
return;
@@ -310,6 +300,16 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
neigh_release(n);
}
+static void mlx5e_ipsec_state_mask(struct mlx5e_ipsec_addr *addrs)
+{
+ /*
+ * State doesn't have subnet prefixes in outer headers.
+ * The match is performed for exaxt source/destination addresses.
+ */
+ memset(addrs->smask.m6, 0xFF, sizeof(__be32) * 4);
+ memset(addrs->dmask.m6, 0xFF, sizeof(__be32) * 4);
+}
+
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
@@ -381,9 +381,11 @@ skip_replay_window:
attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */
- memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
- memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
- attrs->family = x->props.family;
+ memcpy(&attrs->addrs.saddr, x->props.saddr.a6,
+ sizeof(attrs->addrs.saddr));
+ memcpy(&attrs->addrs.daddr, x->id.daddr.a6, sizeof(attrs->addrs.daddr));
+ attrs->addrs.family = x->props.family;
+ mlx5e_ipsec_state_mask(&attrs->addrs);
attrs->type = x->xso.type;
attrs->reqid = x->props.reqid;
attrs->upspec.dport = ntohs(x->sel.dport);
@@ -435,7 +437,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
}
if (x->encap) {
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
- NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulation is not supported");
return -EINVAL;
}
@@ -686,17 +689,17 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
return 0;
}
-static int mlx5e_xfrm_add_state(struct xfrm_state *x,
+static int mlx5e_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
- struct net_device *netdev = x->xso.real_dev;
struct mlx5e_ipsec *ipsec;
struct mlx5e_priv *priv;
gfp_t gfp;
int err;
- priv = netdev_priv(netdev);
+ priv = netdev_priv(dev);
if (!priv->ipsec)
return -EOPNOTSUPP;
@@ -707,6 +710,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
return -ENOMEM;
sa_entry->x = x;
+ sa_entry->dev = dev;
sa_entry->ipsec = ipsec;
/* Check if this SA is originated from acquire flow temporary SA */
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
@@ -724,6 +728,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ sa_entry->esn_state.esn = 1;
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
@@ -768,9 +778,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
MLX5_IPSEC_RESCHED);
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
- x->props.mode == XFRM_MODE_TUNNEL)
- xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
- MLX5E_IPSEC_TUNNEL_SA);
+ x->props.mode == XFRM_MODE_TUNNEL) {
+ xa_lock_bh(&ipsec->sadb);
+ __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
+ MLX5E_IPSEC_TUNNEL_SA);
+ xa_unlock_bh(&ipsec->sadb);
+ }
out:
x->xso.offload_handle = (unsigned long)sa_entry;
@@ -794,10 +807,9 @@ err_xfrm:
return err;
}
-static void mlx5e_xfrm_del_state(struct xfrm_state *x)
+static void mlx5e_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *old;
@@ -806,15 +818,9 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry);
-
- if (attrs->mode == XFRM_MODE_TUNNEL &&
- attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- /* Make sure that no ARP requests are running in parallel */
- flush_workqueue(ipsec->wq);
-
}
-static void mlx5e_xfrm_free_state(struct xfrm_state *x)
+static void mlx5e_xfrm_free_state(struct net_device *dev, struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
@@ -847,8 +853,6 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5e_ipsec *ipsec;
struct neighbour *n = ptr;
- struct net_device *netdev;
- struct xfrm_state *x;
unsigned long idx;
if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
@@ -858,21 +862,19 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
attrs = &sa_entry->attrs;
- if (attrs->family == AF_INET) {
- if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
- !neigh_key_eq32(n, &attrs->daddr.a4))
+ if (attrs->addrs.family == AF_INET) {
+ if (!neigh_key_eq32(n, &attrs->addrs.saddr.a4) &&
+ !neigh_key_eq32(n, &attrs->addrs.daddr.a4))
continue;
} else {
- if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
- !neigh_key_eq128(n, &attrs->daddr.a4))
+ if (!neigh_key_eq128(n, &attrs->addrs.saddr.a4) &&
+ !neigh_key_eq128(n, &attrs->addrs.daddr.a4))
continue;
}
- x = sa_entry->x;
- netdev = x->xso.real_dev;
data = sa_entry->work->data;
- neigh_ha_snapshot(data->addr, n, netdev);
+ neigh_ha_snapshot(data->addr, n, sa_entry->dev);
queue_work(ipsec->wq, &sa_entry->work->work);
}
@@ -958,21 +960,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
priv->ipsec = NULL;
}
-static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
@@ -1003,8 +990,8 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
size_t headers;
lockdep_assert(lockdep_is_held(&x->lock) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
+ lockdep_is_held(&net->xfrm.xfrm_cfg_mutex) ||
+ lockdep_is_held(&net->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
@@ -1040,7 +1027,7 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
* by removing always available headers.
*/
headers = sizeof(struct ethhdr);
- if (sa_entry->attrs.family == AF_INET)
+ if (sa_entry->attrs.addrs.family == AF_INET)
headers += sizeof(struct iphdr);
else
headers += sizeof(struct ipv6hdr);
@@ -1049,6 +1036,43 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
x->curlft.bytes += success_bytes - headers * success_packets;
}
+static __be32 word_to_mask(int prefix)
+{
+ if (prefix < 0)
+ return 0;
+
+ if (!prefix || prefix > 31)
+ return cpu_to_be32(0xFFFFFFFF);
+
+ return cpu_to_be32(((1U << prefix) - 1) << (32 - prefix));
+}
+
+static void mlx5e_ipsec_policy_mask(struct mlx5e_ipsec_addr *addrs,
+ struct xfrm_selector *sel)
+{
+ int i;
+
+ if (addrs->family == AF_INET) {
+ addrs->smask.m4 = word_to_mask(sel->prefixlen_s);
+ addrs->saddr.a4 &= addrs->smask.m4;
+ addrs->dmask.m4 = word_to_mask(sel->prefixlen_d);
+ addrs->daddr.a4 &= addrs->dmask.m4;
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (sel->prefixlen_s != 32 * i)
+ addrs->smask.m6[i] =
+ word_to_mask(sel->prefixlen_s - 32 * i);
+ addrs->saddr.a6[i] &= addrs->smask.m6[i];
+
+ if (sel->prefixlen_d != 32 * i)
+ addrs->dmask.m6[i] =
+ word_to_mask(sel->prefixlen_d - 32 * i);
+ addrs->daddr.a6[i] &= addrs->dmask.m6[i];
+ }
+}
+
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
struct xfrm_policy *x,
struct netlink_ext_ack *extack)
@@ -1121,9 +1145,10 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
sel = &x->selector;
memset(attrs, 0, sizeof(*attrs));
- memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
- memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
- attrs->family = sel->family;
+ memcpy(&attrs->addrs.saddr, sel->saddr.a6, sizeof(attrs->addrs.saddr));
+ memcpy(&attrs->addrs.daddr, sel->daddr.a6, sizeof(attrs->addrs.daddr));
+ attrs->addrs.family = sel->family;
+ mlx5e_ipsec_policy_mask(&attrs->addrs, sel);
attrs->dir = x->xdo.dir;
attrs->action = x->action;
attrs->type = XFRM_DEV_OFFLOAD_PACKET;
@@ -1139,7 +1164,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xdo.real_dev;
+ struct net_device *netdev = x->xdo.dev;
struct mlx5e_ipsec_pol_entry *pol_entry;
struct mlx5e_priv *priv;
int err;
@@ -1201,7 +1226,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
.xdo_dev_state_free = mlx5e_xfrm_free_state,
- .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 7d943e93cf6d..ffcd0cdeb775 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -76,27 +76,36 @@ struct mlx5_replay_esn {
u8 trigger : 1;
};
-struct mlx5_accel_esp_xfrm_attrs {
- u32 spi;
- u32 mode;
- struct aes_gcm_keymat aes_gcm;
-
+struct mlx5e_ipsec_addr {
union {
__be32 a4;
__be32 a6[4];
} saddr;
-
+ union {
+ __be32 m4;
+ __be32 m6[4];
+ } smask;
union {
__be32 a4;
__be32 a6[4];
} daddr;
+ union {
+ __be32 m4;
+ __be32 m6[4];
+ } dmask;
+ u8 family;
+};
+struct mlx5_accel_esp_xfrm_attrs {
+ u32 spi;
+ u32 mode;
+ struct aes_gcm_keymat aes_gcm;
+ struct mlx5e_ipsec_addr addrs;
struct upspec upspec;
u8 dir : 2;
u8 type : 2;
u8 drop : 1;
u8 encap : 1;
- u8 family;
struct mlx5_replay_esn replay_esn;
u32 authsize;
u32 reqid;
@@ -128,6 +137,7 @@ struct mlx5e_ipsec_hw_stats {
u64 ipsec_rx_bytes;
u64 ipsec_rx_drop_pkts;
u64 ipsec_rx_drop_bytes;
+ u64 ipsec_rx_drop_mismatch_sa_sel;
u64 ipsec_tx_pkts;
u64 ipsec_tx_bytes;
u64 ipsec_tx_drop_pkts;
@@ -184,6 +194,7 @@ struct mlx5e_ipsec_ft {
struct mutex mutex; /* Protect changes to this struct */
struct mlx5_flow_table *pol;
struct mlx5_flow_table *sa;
+ struct mlx5_flow_table *sa_sel;
struct mlx5_flow_table *status;
u32 refcnt;
};
@@ -195,6 +206,8 @@ struct mlx5e_ipsec_drop {
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *status_pass;
+ struct mlx5_flow_handle *sa_sel;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
@@ -206,6 +219,7 @@ struct mlx5e_ipsec_rule {
struct mlx5e_ipsec_miss {
struct mlx5_flow_group *group;
struct mlx5_flow_handle *rule;
+ struct mlx5_fc *fc;
};
struct mlx5e_ipsec_tx_create_attr {
@@ -260,6 +274,7 @@ struct mlx5e_ipsec_limits {
struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_esn_state esn_state;
struct xfrm_state *x;
+ struct net_device *dev;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm_attrs attrs;
void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
@@ -274,18 +289,8 @@ struct mlx5e_ipsec_sa_entry {
};
struct mlx5_accel_pol_xfrm_attrs {
- union {
- __be32 a4;
- __be32 a6[4];
- } saddr;
-
- union {
- __be32 a4;
- __be32 a6[4];
- } daddr;
-
+ struct mlx5e_ipsec_addr addrs;
struct upspec upspec;
- u8 family;
u8 action;
u8 type : 2;
u8 dir : 2;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index e51b03d4c717..98b6a3a623f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -16,6 +16,16 @@
#define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
#define IPSEC_TUNNEL_DEFAULT_TTL 0x40
+#define MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS 16
+
+enum {
+ MLX5_IPSEC_ASO_OK,
+ MLX5_IPSEC_ASO_BAD_REPLY,
+
+ /* For crypto offload, set by driver */
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD = 0xAA,
+};
+
struct mlx5e_ipsec_fc {
struct mlx5_fc *cnt;
struct mlx5_fc *drop;
@@ -33,6 +43,9 @@ struct mlx5e_ipsec_tx {
};
struct mlx5e_ipsec_status_checks {
+ struct mlx5_flow_group *pass_group;
+ struct mlx5_flow_handle *packet_offload_pass_rule;
+ struct mlx5_flow_handle *crypto_offload_pass_rule;
struct mlx5_flow_group *drop_all_group;
struct mlx5e_ipsec_drop all;
};
@@ -41,10 +54,12 @@ struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_status_checks status_drops;
+ struct mlx5e_ipsec_miss sa_sel;
+ struct mlx5e_ipsec_status_checks status_checks;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
+ struct mlx5_flow_table *pol_miss_ft;
+ struct mlx5_flow_handle *pol_miss_rule;
u8 allow_tunnel_mode : 1;
};
@@ -130,11 +145,12 @@ static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
int level, int prio,
+ int num_reserved_entries,
int max_num_groups, u32 flags)
{
struct mlx5_flow_table_attr ft_attr = {};
- ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.num_reserved_entries = num_reserved_entries;
ft_attr.autogroup.max_num_groups = max_num_groups;
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.level = level;
@@ -147,22 +163,35 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status_drops.all.rule);
- mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
- mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
+ mlx5_del_flow_rules(rx->status_checks.all.rule);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_checks.all.fc);
+ mlx5_destroy_flow_group(rx->status_checks.drop_all_group);
}
static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status.rule);
+ mlx5_del_flow_rules(rx->status_checks.packet_offload_pass_rule);
+ mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
+}
- if (rx != ipsec->rx_esw)
- return;
+static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_spec *spec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-#ifdef CONFIG_MLX5_ESWITCH
- mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
-#endif
+ if (rx == ipsec->rx_esw) {
+ mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ }
}
static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
@@ -194,17 +223,14 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -223,7 +249,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
}
sa_entry->ipsec_rule.trailer.fc = flow_counter;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -275,16 +301,14 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -348,7 +372,7 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
@@ -359,9 +383,9 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status_drops.drop_all_group = g;
- rx->status_drops.all.rule = rule;
- rx->status_drops.all.fc = flow_counter;
+ rx->status_checks.drop_all_group = g;
+ rx->status_checks.all.rule = rule;
+ rx->status_checks.all.fc = flow_counter;
kvfree(flow_group_in);
kvfree(spec);
@@ -377,9 +401,52 @@ err_out:
return err;
}
-static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
+static int ipsec_rx_status_pass_group_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_flow_group *fg;
+ void *match_criteria;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ start_flow_index, ft->max_fte - 3);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ end_flow_index, ft->max_fte - 2);
+
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to create rx status pass flow group, err=%d\n",
+ err);
+ }
+ rx->status_checks.pass_group = fg;
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static struct mlx5_flow_handle *
+ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest,
+ u8 aso_ok)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
@@ -388,7 +455,7 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.ipsec_syndrome);
@@ -397,11 +464,11 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.ipsec_syndrome, 0);
MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_4, 0);
+ misc_parameters_2.metadata_reg_c_4, aso_ok);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
- flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
@@ -412,19 +479,19 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status.rule = rule;
kvfree(spec);
- return 0;
+ return rule;
err_rule:
kvfree(spec);
- return err;
+ return ERR_PTR(err);
}
static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
ipsec_rx_status_pass_destroy(ipsec, rx);
+ mlx5_destroy_flow_group(rx->status_checks.pass_group);
ipsec_rx_status_drop_destroy(ipsec, rx);
}
@@ -432,19 +499,44 @@ static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
+ struct mlx5_flow_destination pol_dest[2];
+ struct mlx5_flow_handle *rule;
int err;
err = ipsec_rx_status_drop_all_create(ipsec, rx);
if (err)
return err;
- err = ipsec_rx_status_pass_create(ipsec, rx, dest);
+ err = ipsec_rx_status_pass_group_create(ipsec, rx);
if (err)
- goto err_pass_create;
+ goto err_pass_group_create;
+
+ rule = ipsec_rx_status_pass_create(ipsec, rx, dest,
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_crypto_offload_pass_create;
+ }
+ rx->status_checks.crypto_offload_pass_rule = rule;
+
+ pol_dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ pol_dest[0].ft = rx->ft.pol;
+ pol_dest[1] = dest[1];
+ rule = ipsec_rx_status_pass_create(ipsec, rx, pol_dest,
+ MLX5_IPSEC_ASO_OK);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_packet_offload_pass_create;
+ }
+ rx->status_checks.packet_offload_pass_rule = rule;
return 0;
-err_pass_create:
+err_packet_offload_pass_create:
+ mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
+err_crypto_offload_pass_create:
+ mlx5_destroy_flow_group(rx->status_checks.pass_group);
+err_pass_group_create:
ipsec_rx_status_drop_destroy(ipsec, rx);
return err;
}
@@ -493,6 +585,15 @@ out:
return err;
}
+static void ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *old_dest,
+ struct mlx5_flow_destination *new_dest)
+{
+ mlx5_modify_rule_destination(rx->pol_miss_rule, new_dest, old_dest);
+ mlx5_modify_rule_destination(rx->status_checks.crypto_offload_pass_rule,
+ new_dest, old_dest);
+}
+
static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
@@ -507,8 +608,7 @@ static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
- mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
+ ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
}
static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
@@ -520,8 +620,7 @@ static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
family2tt(family));
- mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
- mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
+ ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
}
@@ -577,13 +676,8 @@ static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
}
-static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx, u32 family)
+static void ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx *rx)
{
- /* disconnect */
- if (rx != ipsec->rx_esw)
- ipsec_rx_ft_disconnect(ipsec, family);
-
if (rx->chains) {
ipsec_chains_destroy(rx->chains);
} else {
@@ -592,6 +686,29 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.pol);
}
+ if (rx->pol_miss_rule) {
+ mlx5_del_flow_rules(rx->pol_miss_rule);
+ mlx5_destroy_flow_table(rx->pol_miss_ft);
+ }
+}
+
+static void ipsec_rx_sa_selector_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->sa_sel.rule);
+ mlx5_fc_destroy(mdev, rx->sa_sel.fc);
+ rx->sa_sel.fc = NULL;
+ mlx5_destroy_flow_group(rx->sa_sel.group);
+ mlx5_destroy_flow_table(rx->ft.sa_sel);
+}
+
+static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
+{
+ /* disconnect */
+ if (rx != ipsec->rx_esw)
+ ipsec_rx_ft_disconnect(ipsec, family);
+
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
mlx5_destroy_flow_table(rx->ft.sa);
@@ -600,7 +717,17 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
+ ipsec_rx_sa_selector_destroy(mdev, rx);
+
+ ipsec_rx_policy_destroy(rx);
+
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
+
+#ifdef CONFIG_MLX5_ESWITCH
+ if (rx == ipsec->rx_esw)
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch),
+ 0, 1, 0);
+#endif
}
static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
@@ -652,6 +779,28 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
return 0;
}
+static void ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest,
+ struct mlx5_flow_destination *miss_dest)
+{
+ if (rx == ipsec->rx_esw)
+ *miss_dest = *dest;
+ else
+ *miss_dest =
+ mlx5_ttc_get_default_dest(attr->ttc,
+ family2tt(attr->family));
+}
+
+static void ipsec_rx_default_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest->ft = rx->pol_miss_ft;
+}
+
static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5e_ipsec_rx_create_attr *attr)
@@ -659,15 +808,219 @@ static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
struct mlx5_flow_destination dest = {};
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = rx->ft.pol;
+ dest.ft = rx->ft.sa;
mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
}
+static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (rx == ipsec->rx_esw) {
+ /* No need to create miss table for switchdev mode,
+ * just set it to the root chain table.
+ */
+ rx->pol_miss_ft = dest->ft;
+ return 0;
+ }
+
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = attr->pol_level;
+ ft_attr.prio = attr->prio;
+
+ ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_rule;
+ }
+
+ rx->pol_miss_ft = ft;
+ rx->pol_miss_rule = rule;
+
+ return 0;
+
+err_rule:
+ mlx5_destroy_flow_table(ft);
+ return err;
+}
+
+static int ipsec_rx_policy_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_destination default_dest;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ err = ipsec_rx_chains_create_miss(ipsec, rx, attr, dest);
+ if (err)
+ return err;
+
+ ipsec_rx_default_dest_get(ipsec, rx, &default_dest);
+
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
+ rx->chains = ipsec_chains_create(mdev,
+ default_dest.ft,
+ attr->chains_ns,
+ attr->prio,
+ attr->sa_level,
+ &rx->ft.pol);
+ if (IS_ERR(rx->chains))
+ err = PTR_ERR(rx->chains);
+ } else {
+ ft = ipsec_ft_create(attr->ns, attr->pol_level,
+ attr->prio, 1, 2, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_out;
+ }
+ rx->ft.pol = ft;
+
+ err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol,
+ &default_dest);
+ if (err)
+ mlx5_destroy_flow_table(rx->ft.pol);
+ }
+
+ if (!err)
+ return 0;
+
+err_out:
+ if (rx->pol_miss_rule) {
+ mlx5_del_flow_rules(rx->pol_miss_rule);
+ mlx5_destroy_flow_table(rx->pol_miss_ft);
+ }
+ return err;
+}
+
+static int ipsec_rx_sa_selector_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ u32 *flow_group_in;
+ struct mlx5_fc *fc;
+ int err;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ ft = ipsec_ft_create(attr->ns, attr->status_level, attr->prio, 1,
+ MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Failed to create RX SA selector flow table, err=%d\n",
+ err);
+ goto err_ft;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft->max_fte - 1);
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_err(mdev, "Failed to create RX SA selector miss group, err=%d\n",
+ err);
+ goto err_fg;
+ }
+
+ fc = mlx5_fc_create(mdev, false);
+ if (IS_ERR(fc)) {
+ err = PTR_ERR(fc);
+ mlx5_core_err(mdev,
+ "Failed to create ipsec RX SA selector miss rule counter, err=%d\n",
+ err);
+ goto err_cnt;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = fc;
+ flow_act.action =
+ MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to create RX SA selector miss drop rule, err=%d\n",
+ err);
+ goto err_rule;
+ }
+
+ rx->ft.sa_sel = ft;
+ rx->sa_sel.group = fg;
+ rx->sa_sel.fc = fc;
+ rx->sa_sel.rule = rule;
+
+ kvfree(flow_group_in);
+
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, fc);
+err_cnt:
+ mlx5_destroy_flow_group(fg);
+err_fg:
+ mlx5_destroy_flow_table(ft);
+err_ft:
+ kvfree(flow_group_in);
+ return err;
+}
+
+/* The decryption processing is as follows:
+ *
+ * +----------+ +-------------+
+ * | | | |
+ * | Kernel <--------------+----------+ policy miss <------------+
+ * | | ^ | | ^
+ * +----^-----+ | +-------------+ |
+ * | crypto |
+ * miss offload ok allow/default
+ * ^ ^ ^
+ * | | packet |
+ * +----+---------+ +----+-------------+ offload ok +------+---+
+ * | | | | (no UPSPEC) | |
+ * | SA (decrypt) +-----> status +--->------->----+ policy |
+ * | | | | | |
+ * +--------------+ ++---------+-------+ +-^----+---+
+ * | | | |
+ * v packet +-->->---+ v
+ * | offload ok match |
+ * fails (with UPSPEC) | block
+ * | | +-------------+-+ |
+ * v v | | miss v
+ * drop +---> SA sel +--------->drop
+ * | |
+ * +---------------+
+ */
+
static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx, u32 family)
{
+ struct mlx5_flow_destination dest[2], miss_dest;
struct mlx5e_ipsec_rx_create_attr attr;
- struct mlx5_flow_destination dest[2];
struct mlx5_flow_table *ft;
u32 flags = 0;
int err;
@@ -678,80 +1031,61 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
- ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
+ ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 4, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
}
rx->ft.status = ft;
- dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
- err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
+ err = ipsec_rx_sa_selector_create(ipsec, rx, &attr);
if (err)
- goto err_add;
+ goto err_fs_ft_sa_sel;
/* Create FT */
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
+ ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
}
rx->ft.sa = ft;
- err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
+ ipsec_rx_sa_miss_dest_get(ipsec, rx, &attr, &dest[0], &miss_dest);
+ err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &miss_dest);
if (err)
goto err_fs;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
- rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
- attr.chains_ns,
- attr.prio,
- attr.pol_level,
- &rx->ft.pol);
- if (IS_ERR(rx->chains)) {
- err = PTR_ERR(rx->chains);
- goto err_pol_ft;
- }
-
- goto connect;
- }
+ err = ipsec_rx_policy_create(ipsec, rx, &attr, &dest[0]);
+ if (err)
+ goto err_policy;
- ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- goto err_pol_ft;
- }
- rx->ft.pol = ft;
- memset(dest, 0x00, 2 * sizeof(*dest));
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[0].ft = rx->ft.sa;
- err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = rx->fc->cnt;
+ err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
- goto err_pol_miss;
+ goto err_add;
-connect:
/* connect */
if (rx != ipsec->rx_esw)
ipsec_rx_ft_connect(ipsec, rx, &attr);
return 0;
-err_pol_miss:
- mlx5_destroy_flow_table(rx->ft.pol);
-err_pol_ft:
+err_add:
+ ipsec_rx_policy_destroy(rx);
+err_policy:
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
err_fs:
mlx5_destroy_flow_table(rx->ft.sa);
-err_fs_ft:
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- mlx5_ipsec_rx_status_destroy(ipsec, rx);
-err_add:
+err_fs_ft:
+ ipsec_rx_sa_selector_destroy(mdev, rx);
+err_fs_ft_sa_sel:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
@@ -873,7 +1207,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx->fc->cnt);
+ dest.counter = tx->fc->cnt;
fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
@@ -941,7 +1275,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
int err;
ipsec_tx_create_attr_set(ipsec, tx, &attr);
- ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
+ ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 1, 0);
if (IS_ERR(ft))
return PTR_ERR(ft);
tx->ft.status = ft;
@@ -954,7 +1288,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
+ ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_sa_ft;
@@ -982,7 +1316,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
goto connect_roce;
}
- ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
+ ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 1, 2, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_pol_ft;
@@ -1150,9 +1484,14 @@ static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
mutex_unlock(&tx->ft.mutex);
}
-static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
- __be32 *daddr)
+static void setup_fte_addr4(struct mlx5_flow_spec *spec,
+ struct mlx5e_ipsec_addr *addrs)
{
+ __be32 *saddr = &addrs->saddr.a4;
+ __be32 *smask = &addrs->smask.m4;
+ __be32 *daddr = &addrs->daddr.a4;
+ __be32 *dmask = &addrs->dmask.m4;
+
if (!*saddr && !*daddr)
return;
@@ -1164,21 +1503,26 @@ static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
if (*saddr) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), smask, 4);
}
if (*daddr) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), dmask, 4);
}
}
-static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
- __be32 *daddr)
+static void setup_fte_addr6(struct mlx5_flow_spec *spec,
+ struct mlx5e_ipsec_addr *addrs)
{
+ __be32 *saddr = addrs->saddr.a6;
+ __be32 *smask = addrs->smask.m6;
+ __be32 *daddr = addrs->daddr.a6;
+ __be32 *dmask = addrs->dmask.m6;
+
if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
return;
@@ -1190,15 +1534,15 @@ static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
if (!addr6_all_zero(saddr)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), dmask, 16);
}
if (!addr6_all_zero(daddr)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), smask, 16);
}
}
@@ -1340,7 +1684,8 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action[2], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
- MLX5_SET(set_action_in, action[2], data, 0);
+ MLX5_SET(set_action_in, action[2], data,
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
MLX5_SET(set_action_in, action[2], offset, 0);
MLX5_SET(set_action_in, action[2], length, 32);
}
@@ -1387,7 +1732,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
bfflen += sizeof(*esp_hdr) + 8;
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
bfflen += sizeof(*iphdr);
break;
@@ -1404,7 +1749,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
return -ENOMEM;
eth_hdr = (struct ethhdr *)reformatbf;
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
eth_hdr->h_proto = htons(ETH_P_IP);
break;
@@ -1427,11 +1772,11 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
reformat_params->param_0 = attrs->authsize;
hdr = reformatbf + sizeof(*eth_hdr);
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
iphdr = (struct iphdr *)hdr;
- memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
- memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
+ memcpy(&iphdr->saddr, &attrs->addrs.saddr.a4, 4);
+ memcpy(&iphdr->daddr, &attrs->addrs.daddr.a4, 4);
iphdr->version = 4;
iphdr->ihl = 5;
iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
@@ -1440,8 +1785,8 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
break;
case AF_INET6:
ipv6hdr = (struct ipv6hdr *)hdr;
- memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
- memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
+ memcpy(&ipv6hdr->saddr, &attrs->addrs.saddr.a6, 16);
+ memcpy(&ipv6hdr->daddr, &attrs->addrs.daddr.a6, 16);
ipv6hdr->nexthdr = IPPROTO_ESP;
ipv6hdr->version = 6;
ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
@@ -1475,7 +1820,7 @@ static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
case XFRM_DEV_OFFLOAD_OUT:
- if (attrs->family == AF_INET) {
+ if (attrs->addrs.family == AF_INET) {
if (attrs->encap)
return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
@@ -1576,6 +1921,85 @@ static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
return 0;
}
+static int rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct upspec *upspec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest[2];
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_4, 0);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.sa_sel;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = rx->fc->cnt;
+
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx pass rule, err=%d\n",
+ err);
+ goto err_add_status_pass_rule;
+ }
+
+ sa_entry->ipsec_rule.status_pass = rule;
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4, 0);
+
+ setup_fte_upper_proto_match(spec, upspec);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.pol;
+
+ rule = mlx5_add_flow_rules(rx->ft.sa_sel, spec, &flow_act, &dest[0], 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx sa selector rule, err=%d\n",
+ err);
+ goto err_add_sa_sel_rule;
+ }
+
+ sa_entry->ipsec_rule.sa_sel = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_add_sa_sel_rule:
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
+err_add_status_pass_rule:
+ kvfree(spec);
+ return err;
+}
+
static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
@@ -1589,7 +2013,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5_fc *counter;
int err = 0;
- rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
+ rx = rx_ft_get(mdev, ipsec, attrs->addrs.family, attrs->type);
if (IS_ERR(rx))
return PTR_ERR(rx);
@@ -1599,16 +2023,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_spi(spec, attrs->spi, attrs->encap);
if (!attrs->encap)
setup_fte_esp(spec);
setup_fte_no_frags(spec);
- setup_fte_upper_proto_match(spec, &attrs->upspec);
if (!attrs->drop) {
if (rx != ipsec->rx_esw)
@@ -1649,13 +2072,20 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[0].ft = rx->ft.status;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
+
+ if (attrs->upspec.proto && attrs->type == XFRM_DEV_OFFLOAD_PACKET) {
+ err = rx_add_rule_sa_selector(sa_entry, rx, &attrs->upspec);
+ if (err)
+ goto err_add_sa_sel;
+ }
+
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
err = rx_add_rule_drop_replay(sa_entry, rx);
if (err)
@@ -1679,6 +2109,11 @@ err_drop_reason:
mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
}
err_add_replay:
+ if (sa_entry->ipsec_rule.sa_sel) {
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.sa_sel);
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
+ }
+err_add_sa_sel:
mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
@@ -1691,7 +2126,7 @@ err_pkt_reformat:
err_mod_header:
kvfree(spec);
err_alloc:
- rx_ft_put(ipsec, attrs->family, attrs->type);
+ rx_ft_put(ipsec, attrs->addrs.family, attrs->type);
return err;
}
@@ -1718,23 +2153,21 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
- else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
-
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
+ else
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
case XFRM_DEV_OFFLOAD_PACKET:
- if (attrs->reqid)
- setup_fte_reg_c4(spec, attrs->reqid);
+ setup_fte_reg_c4(spec, attrs->reqid);
err = setup_pkt_reformat(ipsec, attrs, &flow_act);
if (err)
goto err_pkt_reformat;
@@ -1762,7 +2195,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].ft = tx->ft.status;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1812,10 +2245,10 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
}
tx = ipsec_tx(ipsec, attrs->type);
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1835,7 +2268,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
+ dest[dstn].counter = tx->fc->drop;
dstn++;
break;
default:
@@ -1885,12 +2318,12 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
struct mlx5e_ipsec_rx *rx;
int err, dstn = 0;
- ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
- attrs->type);
+ ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->addrs.family,
+ attrs->prio, attrs->type);
if (IS_ERR(ft))
return PTR_ERR(ft);
- rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
+ rx = ipsec_rx(pol_entry->ipsec, attrs->addrs.family, attrs->type);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -1898,10 +2331,10 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1913,7 +2346,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
case XFRM_POLICY_BLOCK:
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
+ dest[dstn].counter = rx->fc->drop;
dstn++;
break;
default:
@@ -1925,8 +2358,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.flags |= FLOW_ACT_NO_APPEND;
if (rx == ipsec->rx_esw && rx->chains)
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[dstn].ft = rx->ft.sa;
+ ipsec_rx_default_dest_get(ipsec, rx, &dest[dstn]);
dstn++;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
if (IS_ERR(rule)) {
@@ -1942,7 +2374,8 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
err_action:
kvfree(spec);
err_alloc:
- rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
+ rx_ft_put_policy(pol_entry->ipsec, attrs->addrs.family, attrs->prio,
+ attrs->type);
return err;
}
@@ -2063,6 +2496,7 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
stats->ipsec_rx_bytes = 0;
stats->ipsec_rx_drop_pkts = 0;
stats->ipsec_rx_drop_bytes = 0;
+ stats->ipsec_rx_drop_mismatch_sa_sel = 0;
stats->ipsec_tx_pkts = 0;
stats->ipsec_tx_bytes = 0;
stats->ipsec_tx_drop_pkts = 0;
@@ -2072,6 +2506,9 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
&stats->ipsec_rx_drop_bytes);
+ if (ipsec->rx_ipv4->sa_sel.fc)
+ mlx5_fc_query(mdev, ipsec->rx_ipv4->sa_sel.fc,
+ &stats->ipsec_rx_drop_mismatch_sa_sel, &bytes);
fc = ipsec->tx->fc;
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
@@ -2100,6 +2537,11 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
stats->ipsec_tx_drop_pkts += packets;
stats->ipsec_tx_drop_bytes += bytes;
}
+
+ if (ipsec->rx_esw->sa_sel.fc &&
+ !mlx5_fc_query(mdev, ipsec->rx_esw->sa_sel.fc,
+ &packets, &bytes))
+ stats->ipsec_rx_drop_mismatch_sa_sel += packets;
}
}
@@ -2197,12 +2639,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_del_flow_rules(ipsec_rule->auth.rule);
mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
+ if (ipsec_rule->sa_sel) {
+ mlx5_del_flow_rules(ipsec_rule->sa_sel);
+ mlx5_del_flow_rules(ipsec_rule->status_pass);
+ }
+
if (ipsec_rule->replay.rule) {
mlx5_del_flow_rules(ipsec_rule->replay.rule);
mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
}
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
- rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
+ rx_ft_put(sa_entry->ipsec, sa_entry->attrs.addrs.family,
+ sa_entry->attrs.type);
}
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
@@ -2238,7 +2686,8 @@ void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
+ rx_ft_put_policy(pol_entry->ipsec,
+ pol_entry->attrs.addrs.family,
pol_entry->attrs.prio, pol_entry->attrs.type);
return;
}
@@ -2378,7 +2827,7 @@ bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec_rx *rx;
struct mlx5e_ipsec_tx *tx;
- rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
+ rx = ipsec_rx(sa_entry->ipsec, attrs->addrs.family, attrs->type);
tx = ipsec_tx(sa_entry->ipsec, attrs->type);
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
return tx->allow_tunnel_mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 53cfa39188cb..820debf3fbbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -91,8 +91,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
- struct mlx5_accel_esp_xfrm_attrs *attrs)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
@@ -120,8 +121,12 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
* active.
*/
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
- if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+ if (!attrs->replay_esn.trigger)
+ MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
+ sa_entry->esn_state.esn);
+ }
if (attrs->lft.hard_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
@@ -175,7 +180,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
res = &mdev->mlx5e_res.hw_objs;
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
+ mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 92bf3fa44a3b..93be388068f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -42,6 +42,7 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_mismatch_sa_sel) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index cc9bcc420032..6ab02f3fc291 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
struct mlx5_macsec_rule_attrs rule_attrs;
union mlx5_macsec_rule *macsec_rule;
+ if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
+ return 0;
+
rule_attrs.macsec_obj_id = sa->macsec_obj_id;
rule_attrs.sci = sa->sci;
rule_attrs.assoc_num = sa->assoc_num;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 8705cffc747f..5fe016e477b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1147,6 +1147,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
bool reset = true;
int err;
+ netdev_lock(priv->netdev);
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
@@ -1162,6 +1163,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
&trust_state, reset);
mutex_unlock(&priv->state_lock);
+ netdev_unlock(priv->netdev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index cae39198b4db..3cb8d3bf9044 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -42,6 +42,8 @@
#include "lib/clock.h"
#include "en/fs_ethtool.h"
+#define LANES_UNKNOWN 0
+
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
@@ -237,14 +239,33 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT,
ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT,
ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT);
-}
-
-static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_1_200GBASE_CR1_KR1, ext,
+ ETHTOOL_LINK_MODE_200000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR_2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseVR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_2_400GBASE_CR2_KR2, ext,
+ ETHTOOL_LINK_MODE_400000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR2_2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseVR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_800GAUI_4_800GBASE_CR4_KR4, ext,
+ ETHTOOL_LINK_MODE_800000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT);
+}
+
+static void mlx5e_ethtool_get_speed_arr(bool ext,
struct ptys2ethtool_config **arr,
u32 *size)
{
- bool ext = mlx5_ptys_ext_supported(mdev);
-
*arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
*size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
ARRAY_SIZE(ptys2legacy_ethtool_table);
@@ -891,37 +912,19 @@ int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
return mlx5e_ethtool_set_per_queue_coalesce(priv, queue, coal);
}
-static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
- unsigned long *supported_modes,
- u32 eth_proto_cap)
+static void ptys2ethtool_process_link(u32 eth_eproto, bool ext, bool advertised,
+ unsigned long *modes)
{
- unsigned long proto_cap = eth_proto_cap;
+ unsigned long eproto = eth_eproto;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
- mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
- for_each_set_bit(proto, &proto_cap, max_size)
- bitmap_or(supported_modes, supported_modes,
- table[proto].supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
- u32 eth_proto_cap, bool ext)
-{
- unsigned long proto_cap = eth_proto_cap;
- struct ptys2ethtool_config *table;
- u32 max_size;
- int proto;
-
- table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
- max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
- ARRAY_SIZE(ptys2legacy_ethtool_table);
-
- for_each_set_bit(proto, &proto_cap, max_size)
- bitmap_or(advertising_modes, advertising_modes,
- table[proto].advertised,
+ mlx5e_ethtool_get_speed_arr(ext, &table, &max_size);
+ for_each_set_bit(proto, &eproto, max_size)
+ bitmap_or(modes, modes,
+ advertised ?
+ table[proto].advertised : table[proto].supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
@@ -931,6 +934,7 @@ static const u32 pplm_fec_2_ethtool[] = {
[MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS,
[MLX5E_FEC_RS_544_514] = ETHTOOL_FEC_RS,
[MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_FEC_LLRS,
+ [MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD] = ETHTOOL_FEC_RS,
};
static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
@@ -1074,50 +1078,51 @@ static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
}
}
-static void get_speed_duplex(struct net_device *netdev,
- u32 eth_proto_oper, bool force_legacy,
- u16 data_rate_oper,
- struct ethtool_link_ksettings *link_ksettings)
+static void get_link_properties(struct net_device *netdev,
+ u32 eth_proto_oper, bool force_legacy,
+ u16 data_rate_oper,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- u32 speed = SPEED_UNKNOWN;
+ const struct mlx5_link_info *info;
u8 duplex = DUPLEX_UNKNOWN;
+ u32 speed = SPEED_UNKNOWN;
+ u32 lanes = LANES_UNKNOWN;
if (!netif_carrier_ok(netdev))
goto out;
- speed = mlx5_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
- if (!speed) {
- if (data_rate_oper)
- speed = 100 * data_rate_oper;
- else
- speed = SPEED_UNKNOWN;
- goto out;
- }
-
- duplex = DUPLEX_FULL;
+ info = mlx5_port_ptys2info(priv->mdev, eth_proto_oper, force_legacy);
+ if (info) {
+ speed = info->speed;
+ lanes = info->lanes;
+ duplex = DUPLEX_FULL;
+ } else if (data_rate_oper)
+ speed = 100 * data_rate_oper;
out:
- link_ksettings->base.speed = speed;
link_ksettings->base.duplex = duplex;
+ link_ksettings->base.speed = speed;
+ link_ksettings->lanes = lanes;
}
static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *supported = link_ksettings->link_modes.supported;
- ptys2ethtool_supported_link(mdev, supported, eth_proto_cap);
+ bool ext = mlx5_ptys_ext_supported(mdev);
+
+ ptys2ethtool_process_link(eth_proto_cap, ext, false, supported);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
-static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+static void get_advertising(u32 eth_proto_admin, u8 tx_pause, u8 rx_pause,
struct ethtool_link_ksettings *link_ksettings,
bool ext)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
- ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
-
+ ptys2ethtool_process_link(eth_proto_admin, ext, true, advertising);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
if (tx_pause ^ rx_pause)
@@ -1173,7 +1178,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
bool ext = mlx5_ptys_ext_supported(mdev);
- ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
+ ptys2ethtool_process_link(eth_proto_lp, ext, true, lp_advertising);
}
static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -1235,8 +1240,8 @@ static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
get_supported(mdev, eth_proto_cap, link_ksettings);
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
- get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
- data_rate_oper, link_ksettings);
+ get_link_properties(priv->netdev, eth_proto_oper, !admin_ext,
+ data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
@@ -1341,28 +1346,22 @@ static bool ext_link_mode_requested(const unsigned long *adver)
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported)
-{
- bool ext_link_mode = ext_link_mode_requested(adver);
-
- return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported;
-}
-
static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_port_eth_proto eproto;
+ struct mlx5_link_info info = {};
const unsigned long *adver;
bool an_changes = false;
u8 an_disable_admin;
bool ext_supported;
+ bool ext_requested;
u8 an_disable_cap;
bool an_disable;
u32 link_modes;
u8 an_status;
u8 autoneg;
- u32 speed;
bool ext;
int err;
@@ -1370,13 +1369,15 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
adver = link_ksettings->link_modes.advertising;
autoneg = link_ksettings->base.autoneg;
- speed = link_ksettings->base.speed;
+ info.speed = link_ksettings->base.speed;
+ info.lanes = link_ksettings->lanes;
ext_supported = mlx5_ptys_ext_supported(mdev);
- ext = ext_requested(autoneg, adver, ext_supported);
- if (!ext_supported && ext)
+ ext_requested = ext_link_mode_requested(adver);
+ if (!ext_supported && ext_requested)
return -EOPNOTSUPP;
+ ext = autoneg == AUTONEG_ENABLE ? ext_requested : ext_supported;
ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
@@ -1386,7 +1387,7 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
goto out;
}
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
- mlx5_port_speed2linkmodes(mdev, speed, !ext);
+ mlx5_port_info2linkmodes(mdev, &info, !ext);
err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
if (err)
@@ -1458,18 +1459,27 @@ static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u32 rss_context = rxfh->rss_context;
+ bool symmetric;
int err;
mutex_lock(&priv->state_lock);
err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context,
- rxfh->indir, rxfh->key, &rxfh->hfunc);
+ rxfh->indir, rxfh->key, &rxfh->hfunc, &symmetric);
mutex_unlock(&priv->state_lock);
- return err;
+
+ if (err)
+ return err;
+
+ if (symmetric)
+ rxfh->input_xfrm = RXH_XFRM_SYM_OR_XOR;
+
+ return 0;
}
static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
struct mlx5e_priv *priv = netdev_priv(dev);
u32 *rss_context = &rxfh->rss_context;
u8 hfunc = rxfh->hfunc;
@@ -1504,7 +1514,8 @@ static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxf
err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context,
rxfh->indir, rxfh->key,
- hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
+ rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
unlock:
mutex_unlock(&priv->state_lock);
@@ -1675,6 +1686,7 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
return 0;
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -2018,7 +2030,7 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (size_read < 0) {
NL_SET_ERR_MSG_FMT_MOD(
extack,
- "Query module eeprom by page failed, read %u bytes, err %d\n",
+ "Query module eeprom by page failed, read %u bytes, err %d",
i, size_read);
return i;
}
@@ -2045,14 +2057,9 @@ int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
if (err)
return err;
- dev_hold(dev);
- rtnl_unlock();
-
err = mlx5_firmware_flash(mdev, fw, NULL);
release_firmware(fw);
- rtnl_lock();
- dev_put(dev);
return err;
}
@@ -2607,12 +2614,14 @@ static void mlx5e_get_ts_stats(struct net_device *netdev,
}
const struct ethtool_ops mlx5e_ethtool_ops = {
+ .cap_link_lanes_supported = true,
.cap_rss_ctx_supported = true,
.rxfh_per_ctx_key = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_input_xfrm = RXH_XFRM_SYM_OR_XOR,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ext_state = mlx5e_get_link_ext_state,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 05058710d2c7..04a969128161 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -484,7 +484,9 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
}
/* Need to fix some features.. */
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
return err;
}
@@ -521,7 +523,9 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
clear_bit(vid, fs->vlan->active_svlans);
mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 773624bb2c5d..d68230a7b9f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -884,8 +884,10 @@ static int flow_type_to_traffic_type(u32 flow_type)
case ESP_V6_FLOW:
return MLX5_TT_IPV6_IPSEC_ESP;
case IPV4_FLOW:
+ case IP_USER_FLOW:
return MLX5_TT_IPV4;
case IPV6_FLOW:
+ case IPV6_USER_FLOW:
return MLX5_TT_IPV6;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d0b80b520397..ea822c69d137 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,7 @@
#include <linux/debugfs.h>
#include <linux/if_bridge.h>
#include <linux/filter.h>
+#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
#include <net/page_pool/types.h>
#include <net/pkt_sched.h>
@@ -311,8 +312,8 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
{
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->hdr.ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->hdr.uctrl;
u16 octowords;
u8 ds_cnt;
@@ -359,7 +360,7 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
return 0;
err_nomem:
- kvfree(shampo->bitmap);
+ bitmap_free(shampo->bitmap);
kvfree(shampo->pages);
return -ENOMEM;
@@ -367,7 +368,7 @@ err_nomem:
static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
{
- kvfree(rq->mpwqe.shampo->bitmap);
+ bitmap_free(rq->mpwqe.shampo->bitmap);
kvfree(rq->mpwqe.shampo->pages);
}
@@ -393,7 +394,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
}
- mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
+ mlx5e_build_umr_wqe(rq, rq->icosq,
+ container_of(&rq->mpwqe.umr_wqe,
+ struct mlx5e_umr_wqe, hdr));
return 0;
}
@@ -737,7 +740,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->netdev = c->netdev;
rq->priv = c->priv;
rq->tstamp = c->tstamp;
- rq->clock = &mdev->clock;
+ rq->clock = mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;
@@ -1614,7 +1617,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int err;
sq->pdev = c->pdev;
- sq->clock = &mdev->clock;
+ sq->clock = mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->mdev = c->mdev;
@@ -1901,7 +1904,20 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
recover_work);
+ /* Recovering queues means re-enabling NAPI, which requires the netdev
+ * instance lock. However, SQ closing flows have to wait for work tasks
+ * to finish while also holding the netdev instance lock. So either get
+ * the lock or find that the SQ is no longer enabled and thus this work
+ * is not relevant anymore.
+ */
+ while (!netdev_trylock(sq->netdev)) {
+ if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
+ return;
+ msleep(20);
+ }
+
mlx5e_reporter_tx_err_cqe(sq);
+ netdev_unlock(sq->netdev);
}
static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
@@ -2023,41 +2039,12 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- if (param->is_xdp_mb)
- set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
-
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
- if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
- unsigned int inline_hdr_sz = 0;
- int i;
-
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
- ds_cnt++;
- }
-
- /* Pre initialize fixed WQE fields */
- for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
-
- sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = 1,
- .num_pkts = 1,
- };
-
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- }
- }
-
return 0;
err_free_xdpsq:
@@ -2087,7 +2074,7 @@ static struct mlx5e_xdpsq *mlx5e_open_xdpredirect_sq(struct mlx5e_channel *c,
struct mlx5e_xdpsq *xdpsq;
int err;
- xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, c->cpu);
+ xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, cpu_to_node(c->cpu));
if (!xdpsq)
return ERR_PTR(-ENOMEM);
@@ -2680,11 +2667,11 @@ void mlx5e_trigger_napi_sched(struct napi_struct *napi)
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
- struct mlx5e_channel_param *cparam,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
struct net_device *netdev = priv->netdev;
+ struct mlx5e_channel_param *cparam;
struct mlx5_core_dev *mdev;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
@@ -2706,8 +2693,15 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
return err;
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
- if (!c)
- return -ENOMEM;
+ cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL);
+ if (!c || !cparam) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ err = mlx5e_build_channel_param(mdev, params, cparam);
+ if (err)
+ goto err_free;
c->priv = priv;
c->mdev = mdev;
@@ -2725,8 +2719,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
- netif_napi_add_config(netdev, &c->napi, mlx5e_napi_poll, ix);
- netif_napi_set_irq(&c->napi, irq);
+ netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix);
+ netif_napi_set_irq_locked(&c->napi, irq);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2741,14 +2735,17 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
*cp = c;
+ kvfree(cparam);
return 0;
err_close_queues:
mlx5e_close_queues(c);
err_napi_del:
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
+err_free:
+ kvfree(cparam);
kvfree(c);
return err;
@@ -2758,7 +2755,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
{
int tc;
- napi_enable(&c->napi);
+ napi_enable_locked(&c->napi);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
@@ -2790,7 +2787,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
mlx5e_deactivate_txqsq(&c->sq[tc]);
mlx5e_qos_deactivate_queues(c);
- napi_disable(&c->napi);
+ napi_disable_locked(&c->napi);
}
static void mlx5e_close_channel(struct mlx5e_channel *c)
@@ -2799,7 +2796,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
mlx5e_close_xsk(c);
mlx5e_close_queues(c);
mlx5e_qos_close_queues(c);
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
kvfree(c);
}
@@ -2807,20 +2804,14 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs)
{
- struct mlx5e_channel_param *cparam;
int err = -ENOMEM;
int i;
chs->num = chs->params.num_channels;
chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
- cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
- if (!chs->c || !cparam)
- goto err_free;
-
- err = mlx5e_build_channel_param(priv->mdev, &chs->params, cparam);
- if (err)
- goto err_free;
+ if (!chs->c)
+ goto err_out;
for (i = 0; i < chs->num; i++) {
struct xsk_buff_pool *xsk_pool = NULL;
@@ -2828,7 +2819,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
if (chs->params.xdp_prog)
xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
- err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
+ err = mlx5e_open_channel(priv, i, &chs->params, xsk_pool, &chs->c[i]);
if (err)
goto err_close_channels;
}
@@ -2846,7 +2837,6 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
}
mlx5e_health_channels_update(priv);
- kvfree(cparam);
return 0;
err_close_ptp:
@@ -2857,9 +2847,8 @@ err_close_channels:
for (i--; i >= 0; i--)
mlx5e_close_channel(chs->c[i]);
-err_free:
kfree(chs->c);
- kvfree(cparam);
+err_out:
chs->num = 0;
return err;
}
@@ -3814,8 +3803,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
/* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB.
*/
- if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
- return -EINVAL;
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "MQPRIO cannot be configured when HTB offload is enabled.");
+ return -EOPNOTSUPP;
+ }
switch (mqprio->mode) {
case TC_MQPRIO_MODE_DCB:
@@ -3944,6 +3936,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
}
stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
+ stats->rx_dropped = PPORT_2863_GET(pstats, if_in_discards);
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
@@ -4297,7 +4290,7 @@ void mlx5e_set_xdp_feature(struct net_device *netdev)
if (!netdev->netdev_ops->ndo_bpf ||
params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
- xdp_clear_features_flag(netdev);
+ xdp_set_features_flag_locked(netdev, 0);
return;
}
@@ -4306,7 +4299,7 @@ void mlx5e_set_xdp_feature(struct net_device *netdev)
NETDEV_XDP_ACT_RX_SG |
NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG;
- xdp_set_features_flag(netdev, val);
+ xdp_set_features_flag_locked(netdev, val);
}
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
@@ -4370,6 +4363,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
+ features &= ~NETIF_F_HW_MACSEC;
+ if (netdev->features & NETIF_F_HW_MACSEC)
+ netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n");
+
return features;
}
@@ -4444,9 +4441,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
} else {
- netdev->netns_local = false;
+ netdev->netns_immutable = false;
}
mutex_unlock(&priv->state_lock);
@@ -4985,21 +4982,19 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
struct net_device *netdev = priv->netdev;
int i;
- /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
- * through this flow. However, channel closing flows have to wait for
- * this work to finish while holding rtnl lock too. So either get the
- * lock or find that channels are being closed for other reason and
- * this work is not relevant anymore.
+ /* Recovering the TX queues implies re-enabling NAPI, which requires
+ * the netdev instance lock.
+ * However, channel closing flows have to wait for this work to finish
+ * while holding the same lock. So either get the lock or find that
+ * channels are being closed for other reason and this work is not
+ * relevant anymore.
*/
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(netdev)) {
if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
return;
msleep(20);
}
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
-
for (i = 0; i < netdev->real_num_tx_queues; i++) {
struct netdev_queue *dev_queue =
netdev_get_tx_queue(netdev, i);
@@ -5013,8 +5008,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
break;
}
-unlock:
- rtnl_unlock();
+ netdev_unlock(netdev);
}
static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -5129,11 +5123,9 @@ static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 mode, setting;
- int err;
- err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
- if (err)
- return err;
+ if (mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting))
+ return -EOPNOTSUPP;
mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
mode,
@@ -5340,7 +5332,6 @@ static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i,
struct mlx5e_rq_stats *xskrq_stats;
struct mlx5e_rq_stats *rq_stats;
- ASSERT_RTNL();
if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch)
return;
@@ -5360,7 +5351,6 @@ static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_sq_stats *sq_stats;
- ASSERT_RTNL();
if (!priv->stats_nch)
return;
@@ -5381,7 +5371,6 @@ static void mlx5e_get_base_stats(struct net_device *dev,
struct mlx5e_ptp *ptp_channel;
int i, tc;
- ASSERT_RTNL();
if (!mlx5e_is_uplink_rep(priv)) {
rx->packets = 0;
rx->bytes = 0;
@@ -5477,6 +5466,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
mlx5e_dcbnl_build_netdev(netdev);
@@ -5858,9 +5849,11 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_nic_set_rx_mode(priv);
rtnl_lock();
+ netdev_lock(netdev);
if (netif_running(netdev))
mlx5e_open(netdev);
udp_tunnel_nic_reset_ntf(priv->netdev);
+ netdev_unlock(netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -5873,9 +5866,16 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_dcbnl_delete_app(priv);
rtnl_lock();
+ netdev_lock(priv->netdev);
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
netif_device_detach(priv->netdev);
+ if (priv->en_trap) {
+ mlx5e_deactivate_trap(priv);
+ mlx5e_close_trap(priv->en_trap);
+ priv->en_trap = NULL;
+ }
+ netdev_unlock(priv->netdev);
rtnl_unlock();
mlx5e_nic_set_rx_mode(priv);
@@ -5885,11 +5885,6 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_blocking_events(priv);
- if (priv->en_trap) {
- mlx5e_deactivate_trap(priv);
- mlx5e_close_trap(priv->en_trap);
- priv->en_trap = NULL;
- }
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
@@ -6144,7 +6139,9 @@ static void mlx5e_update_features(struct net_device *netdev)
return; /* features will be updated on netdev registration */
rtnl_lock();
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
rtnl_unlock();
}
@@ -6155,7 +6152,7 @@ static void mlx5e_reset_channels(struct net_device *netdev)
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
- const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
+ const bool need_lock = priv->netdev->reg_state == NETREG_REGISTERED;
const struct mlx5e_profile *profile = priv->profile;
int max_nch;
int err;
@@ -6197,15 +6194,19 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
* 2. Set our default XPS cpumask.
* 3. Build the RQT.
*
- * rtnl_lock is required by netif_set_real_num_*_queues in case the
+ * Locking is required by netif_set_real_num_*_queues in case the
* netdev has been registered by this point (if this function was called
* in the reload or resume flow).
*/
- if (take_rtnl)
+ if (need_lock) {
rtnl_lock();
+ netdev_lock(priv->netdev);
+ }
err = mlx5e_num_channels_changed(priv);
- if (take_rtnl)
+ if (need_lock) {
+ netdev_unlock(priv->netdev);
rtnl_unlock();
+ }
if (err)
goto out;
@@ -6540,8 +6541,23 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
mlx5_core_uplink_netdev_set(mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
- unregister_netdev(priv->netdev);
- _mlx5e_suspend(adev, false);
+ /* When unload driver, the netdev is in registered state
+ * if it's from legacy mode. If from switchdev mode, it
+ * is already unregistered before changing to NIC profile.
+ */
+ if (priv->netdev->reg_state == NETREG_REGISTERED) {
+ unregister_netdev(priv->netdev);
+ _mlx5e_suspend(adev, false);
+ } else {
+ struct mlx5_core_dev *pos;
+ int i;
+
+ if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
+ mlx5_sd_for_each_dev(i, mdev, pos)
+ mlx5e_destroy_mdev_resources(pos);
+ else
+ _mlx5e_suspend(adev, true);
+ }
/* Avoid cleanup if profile rollback failed. */
if (priv->profile)
priv->profile->cleanup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 554f9cb5b53f..63a7a788fb0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -33,6 +33,7 @@
#include <linux/dim.h>
#include <linux/debugfs.h>
#include <linux/mlx5/fs.h>
+#include <net/netdev_lock.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/act_api.h>
@@ -65,6 +66,7 @@
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
+#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -802,6 +804,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_rep_setup_tc,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -855,6 +858,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev))
+ params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE;
/* If netdev is already registered (e.g. move from nic profile to uplink,
* RTNL lock must be held before triggering netdev notifiers.
@@ -882,10 +887,14 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
{
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
eth_hw_addr_random(netdev);
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->watchdog_timeo = 15 * HZ;
+ if (mlx5_core_is_ecpf(mdev))
+ netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
@@ -900,7 +909,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->features |= netdev->hw_features;
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
@@ -1339,9 +1348,11 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
netdev->wanted_features |= NETIF_F_HW_TC;
rtnl_lock();
+ netdev_lock(netdev);
if (netif_running(netdev))
mlx5e_open(netdev);
udp_tunnel_nic_reset_ntf(priv->netdev);
+ netdev_unlock(netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -1351,9 +1362,11 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
rtnl_lock();
+ netdev_lock(priv->netdev);
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
netif_device_detach(priv->netdev);
+ netdev_unlock(priv->netdev);
rtnl_unlock();
mlx5e_rep_bridge_cleanup(priv);
@@ -1509,6 +1522,21 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
priv = netdev_priv(netdev);
+ /* This bit is set when using devlink to change eswitch mode from
+ * switchdev to legacy. As need to keep uplink netdev ifindex, we
+ * detach uplink representor profile and attach NIC profile only.
+ * The netdev will be unregistered later when unload NIC auxiliary
+ * driver for this case.
+ * We explicitly block devlink eswitch mode change if any IPSec rules
+ * offloaded, but can't block other cases, such as driver unload
+ * and devlink reload. We have to unregister netdev before profile
+ * change for those cases. This is to avoid resource leak because
+ * the offloaded rules don't have the chance to be unoffloaded before
+ * cleanup which is triggered by detach uplink representor profile.
+ */
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
+ unregister_netdev(netdev);
+
mlx5e_netdev_attach_nic_profile(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1963bc5adb18..84b1ab8233b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -631,16 +631,16 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
__be32 key, u16 offset, u16 ksm_len)
{
memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
- umr_wqe->ctrl.umr_mkey = key;
- umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
+ umr_wqe->hdr.ctrl.umr_mkey = key;
+ umr_wqe->hdr.ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
| MLX5E_KSM_UMR_DS_CNT(ksm_len));
- umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
- umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len);
- umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ umr_wqe->hdr.uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_octowords = cpu_to_be16(ksm_len);
+ umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index)
@@ -704,7 +704,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1);
sq->pc += wqe_bbs;
- sq->doorbell_cseg = &umr_wqe->ctrl;
+ sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
@@ -814,12 +814,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
@@ -829,7 +829,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
sq->pc += rq->mpwqe.umr_wqebbs;
- sq->doorbell_cseg = &umr_wqe->ctrl;
+ sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
@@ -1684,17 +1684,17 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct mlx5e_xdp_buff mxbuf;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- cqe_bcnt, &mxbuf);
- if (mlx5e_xdp_handle(rq, prog, &mxbuf))
+ cqe_bcnt, mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, mxbuf))
return NULL; /* page/packet was consumed by XDP */
- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -1713,11 +1713,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
struct mlx5e_wqe_frag_info *head_wi = wi;
u16 rx_headroom = rq->buff.headroom;
struct mlx5e_frag_page *frag_page;
struct skb_shared_info *sinfo;
- struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
@@ -1737,8 +1737,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
net_prefetch(va + rx_headroom);
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- frag_consumed_bytes, &mxbuf);
- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+ frag_consumed_bytes, mxbuf);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
@@ -1750,8 +1750,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page,
- wi->offset, frag_consumed_bytes);
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
+ frag_page, wi->offset,
+ frag_consumed_bytes);
truesize += frag_info->frag_stride;
cqe_bcnt -= frag_consumed_bytes;
@@ -1760,7 +1761,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_wqe_frag_info *pwi;
@@ -1770,21 +1771,23 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
- mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
- mxbuf.xdp.data_end - mxbuf.xdp.data,
- mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ skb = mlx5e_build_linear_skb(
+ rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
+ mxbuf->xdp.data_end - mxbuf->xdp.data,
+ mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb))
return NULL;
skb_mark_for_recycle(skb);
head_wi->frag_page->frags++;
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, wi - head_wi - 1,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_buff_is_frag_pfmemalloc(
+ &mxbuf->xdp));
for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
pwi->frag_page->frags++;
@@ -1984,10 +1987,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
struct mlx5e_frag_page *head_page = frag_page;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
u32 frag_offset = head_offset;
u32 byte_cnt = cqe_bcnt;
struct skb_shared_info *sinfo;
- struct mlx5e_xdp_buff mxbuf;
unsigned int truesize = 0;
struct bpf_prog *prog;
struct sk_buff *skb;
@@ -2033,9 +2036,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
}
- mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
+ mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz,
+ linear_data_len, mxbuf);
- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
while (byte_cnt) {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
@@ -2046,7 +2050,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
else
truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
+ frag_page, frag_offset,
pg_consumed_bytes);
byte_cnt -= pg_consumed_bytes;
frag_offset = 0;
@@ -2054,7 +2059,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
if (prog) {
- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_frag_page *pfp;
@@ -2067,10 +2072,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
- linear_frame_sz,
- mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
- mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ skb = mlx5e_build_linear_skb(
+ rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
+ mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq, &wi->linear_page);
return NULL;
@@ -2080,13 +2085,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
wi->linear_page.frags++;
mlx5e_page_release_fragmented(rq, &wi->linear_page);
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, frag_page - head_page,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_buff_is_frag_pfmemalloc(
+ &mxbuf->xdp));
pagep = head_page;
do
@@ -2097,12 +2103,13 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
} else {
dma_addr_t addr;
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
xdp_update_skb_shared_info(skb, sinfo->nr_frags,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_buff_is_frag_pfmemalloc(
+ &mxbuf->xdp));
pagep = frag_page - sinfo->nr_frags;
do
@@ -2152,20 +2159,20 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct mlx5e_xdp_buff mxbuf;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- cqe_bcnt, &mxbuf);
- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ cqe_bcnt, mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
frag_page->frags++;
return NULL; /* page/packet was consumed by XDP */
}
- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1d60465cc2ca..2f7a543feca6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -166,6 +166,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
struct udphdr *udph;
struct iphdr *iph;
+ if (skb_linearize(skb))
+ goto out;
+
/* We are only going to peek, no need to clone the SKB */
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 611ec4b6f370..19664fa7f217 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -37,9 +37,7 @@
#include "en/ptp.h"
#include "en/port.h"
-#ifdef CONFIG_PAGE_POOL_STATS
#include <net/page_pool/helpers.h>
-#endif
void mlx5e_ethtool_put_stat(u64 **data, u64 val)
{
@@ -196,7 +194,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
-#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
@@ -208,7 +205,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
-#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
@@ -377,7 +373,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_arfs_err += rq_stats->arfs_err;
#endif
s->rx_recover += rq_stats->recover;
-#ifdef CONFIG_PAGE_POOL_STATS
s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
@@ -389,7 +384,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
@@ -496,7 +490,6 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
}
}
-#ifdef CONFIG_PAGE_POOL_STATS
static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
{
struct mlx5e_rq_stats *rq_stats = c->rq.stats;
@@ -519,11 +512,6 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
}
-#else
-static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
-{
-}
-#endif
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
@@ -1227,6 +1215,13 @@ out:
mutex_unlock(&priv->state_lock);
}
+#define PPORT_PHY_LAYER_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.phys_layer_cntrs.c)
+static const struct counter_desc pport_phy_layer_cntrs_stats_desc[] = {
+ { "link_down_events_phy", PPORT_PHY_LAYER_OFF(link_down_events) }
+};
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -1243,25 +1238,45 @@ pport_phy_statistical_err_lanes_stats_desc[] = {
{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
};
+#define PPORT_PHY_RECOVERY_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, counter_set.phys_layer_recovery_cntrs.c)
+static const struct counter_desc
+pport_phy_recovery_cntrs_stats_desc[] = {
+ { "total_success_recovery_phy",
+ PPORT_PHY_RECOVERY_OFF(total_successful_recovery_events) }
+};
+
+#define NUM_PPORT_PHY_LAYER_COUNTERS \
+ ARRAY_SIZE(pport_phy_layer_cntrs_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
+#define NUM_PPORT_PHY_RECOVERY_COUNTERS \
+ ARRAY_SIZE(pport_phy_recovery_cntrs_stats_desc)
+
+#define NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_statistical_group) ? \
+ NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0)
+#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, per_lane_error_counters) ? \
+ NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0)
+#define NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_recovery_counters) ? \
+ NUM_PPORT_PHY_RECOVERY_COUNTERS : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_stats;
- /* "1" for link_down_events special counter */
- num_stats = 1;
+ num_stats = NUM_PPORT_PHY_LAYER_COUNTERS;
- num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
- NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
+ num_stats += NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev);
- num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
- NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
+ num_stats += NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ num_stats += NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev);
return num_stats;
}
@@ -1270,18 +1285,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- ethtool_puts(data, "link_down_events_phy");
-
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
+ ethtool_puts(data, pport_phy_layer_cntrs_stats_desc[i].format);
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
ethtool_puts(data, pport_phy_statistical_stats_desc[i].format);
- if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- ethtool_puts(data,
- pport_phy_statistical_err_lanes_stats_desc[i].format);
+ for (i = 0;
+ i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ i++)
+ ethtool_puts(data,
+ pport_phy_statistical_err_lanes_stats_desc[i]
+ .format);
+
+ for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
+ ethtool_puts(data,
+ pport_phy_recovery_cntrs_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
@@ -1289,30 +1308,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- /* link_down_events_phy has special handling since it is not stored in __be64 format */
- mlx5e_ethtool_put_stat(
- data, MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events));
-
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(&priv->stats.pport
+ .phy_counters,
+ pport_phy_layer_cntrs_stats_desc, i));
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
mlx5e_ethtool_put_stat(
data,
MLX5E_READ_CTR64_BE(
&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_stats_desc, i));
- if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- mlx5e_ethtool_put_stat(
- data,
- MLX5E_READ_CTR64_BE(
- &priv->stats.pport
- .phy_statistical_counters,
- pport_phy_statistical_err_lanes_stats_desc,
- i));
+ for (i = 0;
+ i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_err_lanes_stats_desc, i));
+
+ for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(
+ &priv->stats.pport.phy_recovery_counters,
+ pport_phy_recovery_cntrs_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
@@ -1328,12 +1352,21 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
+ out = pstats->phy_statistical_counters;
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
+ 0);
+ }
- out = pstats->phy_statistical_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_recovery_counters)) {
+ out = pstats->phy_recovery_counters;
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_PHYSICAL_LAYER_RECOVERY_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
+ 0);
+ }
}
void mlx5e_get_link_ext_stats(struct net_device *dev,
@@ -2086,7 +2119,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
#endif
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
-#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
@@ -2098,7 +2130,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
-#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
@@ -2393,8 +2424,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
}
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
- ethtool_sprintf(data, ptp_rq_stats_desc[i].format,
- MLX5E_PTP_CHANNEL_IX);
+ ethtool_puts(data, ptp_rq_stats_desc[i].format);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 5961c569cfe0..def5dea1463d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -54,7 +54,7 @@
#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
-#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq0_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
@@ -215,7 +215,6 @@ struct mlx5e_sw_stats {
u64 ch_aff_change;
u64 ch_force_irq;
u64 ch_eq_rearm;
-#ifdef CONFIG_PAGE_POOL_STATS
u64 rx_pp_alloc_fast;
u64 rx_pp_alloc_slow;
u64 rx_pp_alloc_slow_high_order;
@@ -227,7 +226,6 @@ struct mlx5e_sw_stats {
u64 rx_pp_recycle_ring;
u64 rx_pp_recycle_ring_full;
u64 rx_pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
@@ -309,6 +307,9 @@ struct mlx5e_vport_stats {
#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
counter_set.phys_layer_statistical_cntrs.c##_high)
+#define PPORT_PHY_RECOVERY_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, (pstats)->phy_recovery_counters, \
+ counter_set.phys_layer_recovery_cntrs.c)
#define PPORT_PER_PRIO_GET(pstats, prio, c) \
MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -324,6 +325,7 @@ struct mlx5e_pport_stats {
__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 phy_recovery_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
@@ -381,7 +383,6 @@ struct mlx5e_rq_stats {
u64 arfs_err;
#endif
u64 recover;
-#ifdef CONFIG_PAGE_POOL_STATS
u64 pp_alloc_fast;
u64 pp_alloc_slow;
u64 pp_alloc_slow_high_order;
@@ -393,7 +394,6 @@ struct mlx5e_rq_stats {
u64 pp_recycle_ring;
u64 pp_recycle_ring_full;
u64 pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 6b3b1afe8312..fef418e1ed1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1282,7 +1282,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
+ dest[dest_ix].counter = attr->counter;
dest_ix++;
}
@@ -1750,9 +1750,6 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
!list_is_first(&attr->list, &flow->attrs))
return 0;
- if (flow_flag_test(flow, SLOW))
- return 0;
-
esw_attr = attr->esw_attr;
if (!esw_attr->split_count ||
esw_attr->split_count == esw_attr->out_count - 1)
@@ -1766,7 +1763,7 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
/* external dest with encap is considered as internal by firmware */
if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
- !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID))
+ !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
ext_dest = true;
else
int_dest = true;
@@ -2031,9 +2028,8 @@ err_out:
return err;
}
-static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
+static bool mlx5_flow_has_geneve_opt(struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
void *headers_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters_3);
@@ -2072,7 +2068,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
}
complete_all(&flow->del_hw_done);
- if (mlx5_flow_has_geneve_opt(flow))
+ if (mlx5_flow_has_geneve_opt(&attr->parse_attr->spec))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
if (flow->decap_route)
@@ -2577,12 +2573,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
if (err) {
- kvfree(tmp_spec);
NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
- return err;
+ } else {
+ err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
}
- err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
+ if (mlx5_flow_has_geneve_opt(tmp_spec))
+ mlx5_geneve_tlv_option_del(priv->mdev->geneve);
kvfree(tmp_spec);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f8c7912abe0e..55a8629f0792 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -337,10 +337,11 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
};
}
-static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
+static void mlx5e_tx_skb_update_ts_flags(struct sk_buff *skb)
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_tx_timestamp(skb);
}
static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
@@ -392,7 +393,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
- mlx5e_tx_skb_update_hwts_flags(skb);
+ mlx5e_tx_skb_update_ts_flags(skb);
sq->pc += wi->num_wqebbs;
@@ -525,9 +526,9 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_tx_wqe *wqe;
- u16 pi;
+ u16 pi, num_wqebbs;
- pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
+ pi = mlx5e_txqsq_get_next_pi_anysize(sq, &num_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
@@ -535,6 +536,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
.wqe = wqe,
.bytes_count = 0,
.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .ds_count_max = num_wqebbs * MLX5_SEND_WQEBB_NUM_DS,
.pkt_count = 0,
.inline_on = 0,
};
@@ -624,9 +626,9 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
- mlx5e_tx_skb_update_hwts_flags(skb);
+ mlx5e_tx_skb_update_ts_flags(skb);
- if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
cseg = mlx5e_tx_mpwqe_session_complete(sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 2b229b6226c6..dfb079e59d85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -871,8 +871,8 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
+ struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 6b4c9ffad95b..7dd1dc3f77c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -135,7 +135,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
if (drop_counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
+ drop_ctr_dst.counter = drop_counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
index d599e50af346..3ce455c2535c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
@@ -27,7 +27,7 @@ esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns,
esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index);
+ root_ns = mlx5_get_flow_vport_namespace(dev, ns, vport->index);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
vport_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index 093ed86a0acd..1c37098e09ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -260,7 +260,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(counter);
+ drop_ctr_dst.counter = counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index c5ea1d1d2b03..76e35c827da0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -570,7 +570,8 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge,
struct mlx5_eswitch *esw)
{
@@ -628,7 +629,7 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dests[0].ft = bridge->egress_ft;
dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dests[1].counter_id = counter_id;
+ dests[1].counter = counter;
handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
ARRAY_SIZE(dests));
@@ -639,17 +640,19 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
- return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, bridge->br_offloads->esw);
}
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
@@ -671,7 +674,7 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
goto out;
}
- handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, peer_esw);
out:
@@ -1385,10 +1388,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
handle = peer ?
mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
- addr, vlan, mlx5_fc_id(counter),
- bridge) :
+ addr, vlan, counter, bridge) :
mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
- mlx5_fc_id(counter), bridge);
+ counter, bridge);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
@@ -1861,7 +1863,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
addr, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
+ "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)",
addr, vport_num);
return -EINVAL;
}
@@ -1874,7 +1876,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)",
addr, vid, vport_num);
return -EINVAL;
}
@@ -1882,7 +1884,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
err = mlx5_esw_bridge_port_mdb_attach(dev, port, addr, vid);
if (err) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)",
addr, vid, vport_num);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 982fe3714683..b7102e14d23d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -32,7 +32,7 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
u16 pfnum;
mlx5_esw_get_port_parent_id(dev, &ppid);
- pfnum = mlx5_get_dev_index(dev);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
@@ -110,7 +110,7 @@ static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw
struct netdev_phys_item_id ppid = {};
u16 pfnum;
- pfnum = mlx5_get_dev_index(dev);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 5a0047bdcb51..3cfe743610d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -10,9 +10,9 @@
#endif
enum {
- MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL,
MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
+ MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
};
enum {
@@ -85,6 +85,19 @@ err_header_alloc:
return err;
}
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec)
+{
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1,
+ ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_1,
+ sa_entry->rx_mapped_id << ESW_ZONE_ID_BITS);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+}
+
void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
@@ -150,11 +163,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
unsigned long i;
int err;
- xa_for_each(&esw->offloads.vport_reps, i, rep) {
- rpriv = rep->rep_data[REP_ETH].priv;
- if (!rpriv || !rpriv->netdev)
+ mlx5_esw_for_each_rep(esw, i, rep) {
+ if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue;
+ rpriv = rep->rep_data[REP_ETH].priv;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
rhashtable_walk_start(&iter);
while ((flow = rhashtable_walk_next(&iter)) != NULL) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index ac9c65b89166..514c15258b1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -20,6 +20,8 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec);
#else
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
@@ -48,5 +50,8 @@ static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr) {}
static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
+static inline void
+mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 45183de424f3..76382626ad41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -96,7 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
- ft_attr.max_fte = POOL_NEXT_SIZE;
+ ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE;
ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 8b7c843446e1..b6ae384396b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -90,15 +90,30 @@ struct mlx5_esw_sched_node {
struct list_head children;
/* Valid only if this node is associated with a vport. */
struct mlx5_vport *vport;
+ /* Level in the hierarchy. The root node level is 1. */
+ u8 level;
};
+static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
+{
+ if (!node->parent) {
+ /* Root children are assigned a depth level of 2. */
+ node->level = 2;
+ list_add_tail(&node->entry, &node->esw->qos.domain->nodes);
+ } else {
+ node->level = node->parent->level + 1;
+ list_add_tail(&node->entry, &node->parent->children);
+ }
+}
+
static void
esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_node *parent)
{
list_del_init(&node->entry);
node->parent = parent;
- list_add_tail(&node->entry, &parent->children);
- node->esw = parent->esw;
+ if (parent)
+ node->esw = parent->esw;
+ esw_qos_node_attach_to_parent(node);
}
void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport)
@@ -305,8 +320,9 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node,
return 0;
}
-static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
- u32 *tsar_ix)
+static int
+esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
+ u32 max_rate, u32 bw_share, u32 *tsar_ix)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
void *attr;
@@ -323,6 +339,8 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
parent_element_id);
+ MLX5_SET(scheduling_context, tsar_ctx, max_average_bw, max_rate);
+ MLX5_SET(scheduling_context, tsar_ctx, bw_share, bw_share);
attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
@@ -358,7 +376,6 @@ static struct mlx5_esw_sched_node *
__esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type,
struct mlx5_esw_sched_node *parent)
{
- struct list_head *parent_children;
struct mlx5_esw_sched_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -370,8 +387,7 @@ __esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type
node->type = type;
node->parent = parent;
INIT_LIST_HEAD(&node->children);
- parent_children = parent ? &parent->children : &esw->qos.domain->nodes;
- list_add_tail(&node->entry, parent_children);
+ esw_qos_node_attach_to_parent(node);
return node;
}
@@ -396,7 +412,8 @@ __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sch
u32 tsar_ix;
int err;
- err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, &tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, 0,
+ 0, &tsar_ix);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for node failed");
return ERR_PTR(err);
@@ -463,7 +480,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
- err = esw_qos_create_node_sched_elem(esw->dev, 0, &esw->qos.root_tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, 0, 0, 0,
+ &esw->qos.root_tsar_ix);
if (err) {
esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
return err;
@@ -564,6 +582,9 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_
return err;
esw_qos_normalize_min_rate(parent->esw, parent, extack);
+ trace_mlx5_esw_vport_qos_create(vport->dev, vport,
+ vport->qos.sched_node->max_rate,
+ vport->qos.sched_node->bw_share);
return 0;
}
@@ -591,8 +612,11 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
sched_node->vport = vport;
vport->qos.sched_node = sched_node;
err = esw_qos_vport_enable(vport, parent, extack);
- if (err)
+ if (err) {
+ __esw_qos_free_node(sched_node);
esw_qos_put(esw);
+ vport->qos.sched_node = NULL;
+ }
return err;
}
@@ -980,10 +1004,10 @@ int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_s
return err;
}
-int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent,
- void *priv, void *parent_priv,
- struct netlink_ext_ack *extack)
+int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *node;
struct mlx5_vport *vport = priv;
@@ -994,3 +1018,105 @@ int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
node = parent_priv;
return mlx5_esw_qos_vport_update_parent(vport, node, extack);
}
+
+static int
+mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ u8 new_level, max_level;
+
+ if (parent && parent->esw != node->esw) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot assign node to another E-Switch");
+ return -EOPNOTSUPP;
+ }
+
+ if (!list_empty(&node->children)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot reassign a node that contains rate objects");
+ return -EOPNOTSUPP;
+ }
+
+ new_level = parent ? parent->level + 1 : 2;
+ max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
+ if (new_level > max_level) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Node hierarchy depth exceeds the maximum supported level");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int esw_qos_vports_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent = node->parent;
+ struct mlx5_eswitch *esw = node->esw;
+ u32 parent_ix;
+ int err;
+
+ parent_ix = parent ? parent->ix : node->esw->qos.root_tsar_ix;
+ mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ node->ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, parent_ix,
+ node->max_rate, 0, &node->ix);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to create a node under the new hierarchy.");
+ if (esw_qos_create_node_sched_elem(esw->dev, curr_parent->ix,
+ node->max_rate,
+ node->bw_share,
+ &node->ix))
+ esw_warn(esw->dev, "Node restore QoS failed\n");
+
+ return err;
+ }
+ esw_qos_node_set_parent(node, parent);
+
+ return 0;
+}
+
+static int mlx5_esw_qos_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent;
+ struct mlx5_eswitch *esw = node->esw;
+ int err;
+
+ err = mlx5_esw_qos_node_validate_set_parent(node, parent, extack);
+ if (err)
+ return err;
+
+ esw_qos_lock(esw);
+ curr_parent = node->parent;
+ err = esw_qos_vports_node_update_parent(node, parent, extack);
+ if (err)
+ goto out;
+
+ esw_qos_normalize_min_rate(esw, curr_parent, extack);
+ esw_qos_normalize_min_rate(esw, parent, extack);
+
+out:
+ esw_qos_unlock(esw);
+
+ return err;
+}
+
+int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *node = priv, *parent_node;
+
+ if (!parent)
+ return mlx5_esw_qos_node_update_parent(node, NULL, extack);
+
+ parent_node = parent_priv;
+ return mlx5_esw_qos_node_update_parent(node, parent_node, extack);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
index 6eb8f6a648c8..ed40ec8f027e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
@@ -29,10 +29,14 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
struct netlink_ext_ack *extack);
-int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent,
- void *priv, void *parent_priv,
- struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack);
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7fb8a3381f84..4917d185d0c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1295,12 +1295,15 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- ret = mlx5_eswitch_load_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs,
- enabled_events);
- if (ret)
- goto ec_vf_err;
- }
+ }
+
+ /* Enable ECVF vports */
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ ret = mlx5_eswitch_load_ec_vf_vports(esw,
+ esw->esw_funcs.num_ec_vfs,
+ enabled_events);
+ if (ret)
+ goto ec_vf_err;
}
/* Enable VF vports */
@@ -1331,9 +1334,11 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ mlx5_eswitch_unload_ec_vf_vports(esw,
+ esw->esw_funcs.num_ec_vfs);
+
if (mlx5_ecpf_vport_exists(esw->dev)) {
- if (mlx5_core_ec_sriov_enabled(esw->dev))
- mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a83d41121db6..8573d36785f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -714,6 +714,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
(last) - 1)
+#define mlx5_esw_for_each_rep(esw, i, rep) \
+ xa_for_each(&((esw)->offloads.vport_reps), i, rep)
+
struct mlx5_eswitch *__must_check
mlx5_devlink_eswitch_get(struct devlink *devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d6ff2dc4c19e..0e3a977d5332 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -53,9 +53,6 @@
#include "lag/lag.h"
#include "en/tc/post_meter.h"
-#define mlx5_esw_for_each_rep(esw, i, rep) \
- xa_for_each(&((esw)->offloads.vport_reps), i, rep)
-
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/
@@ -651,6 +648,7 @@ esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
meter = attr->meter_attr.meter;
flow_act->exe_aso.type = attr->exe_aso_type;
flow_act->exe_aso.object_id = meter->obj_id;
+ flow_act->exe_aso.base_id = mlx5e_flow_meter_get_base_id(meter);
flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
/* use metadata reg 5 for packet color */
@@ -724,7 +722,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[i].counter_id = mlx5_fc_id(attr->counter);
+ dest[i].counter = attr->counter;
i++;
}
@@ -2335,9 +2333,10 @@ out_free:
static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode)
{
mlx5_devcom_comp_lock(esw->dev->priv.hca_devcom_comp);
-
- if (esw->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) {
+ if (esw->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV ||
+ mlx5_core_mp_enabled(esw->dev)) {
esw->mode = mode;
+ mlx5_rescan_drivers_locked(esw->dev);
mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp);
return;
}
@@ -2830,9 +2829,9 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
if (IS_ERR(vport))
return PTR_ERR(vport);
- egress_ns = mlx5_get_flow_vport_acl_namespace(master,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- vport->index);
+ egress_ns = mlx5_get_flow_vport_namespace(master,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ vport->index);
if (!egress_ns)
return -EINVAL;
@@ -3534,7 +3533,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
int err;
mutex_init(&esw->offloads.termtbl_mutex);
- mlx5_rdma_enable_roce(esw->dev);
+ err = mlx5_rdma_enable_roce(esw->dev);
+ if (err)
+ goto err_roce;
err = mlx5_esw_host_number_init(esw);
if (err)
@@ -3595,6 +3596,7 @@ err_vport_metadata:
esw_offloads_metadata_uninit(esw);
err_metadata:
mlx5_rdma_disable_roce(esw->dev);
+err_roce:
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}
@@ -3779,6 +3781,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
+ if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
@@ -4157,37 +4161,12 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
-static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
-{
- int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *query_ctx;
- void *hca_caps;
- int err;
-
- *vhca_id = 0;
-
- query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
- if (!query_ctx)
- return -ENOMEM;
-
- err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
- if (err)
- goto out_free;
-
- hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
- *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
-
-out_free:
- kfree(query_ctx);
- return err;
-}
-
int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
{
u16 *old_entry, *vhca_map_entry, vhca_id;
int err;
- err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id);
if (err) {
esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
vport_num, err);
@@ -4213,7 +4192,7 @@ void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
u16 *vhca_map_entry, vhca_id;
int err;
- err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id);
if (err)
esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
vport_num, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index d91ea53eb394..01c5f5990f9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -6,6 +6,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/events.h"
+#include "hwmon.h"
struct mlx5_event_nb {
struct mlx5_nb nb;
@@ -153,21 +154,50 @@ static int any_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
+#if IS_ENABLED(CONFIG_HWMON)
+static void print_sensor_names_in_bit_set(struct mlx5_core_dev *dev, struct mlx5_hwmon *hwmon,
+ u64 bit_set, int bit_set_offset)
+{
+ unsigned long *bit_set_ptr = (unsigned long *)&bit_set;
+ int num_bits = sizeof(bit_set) * BITS_PER_BYTE;
+ int i;
+
+ for_each_set_bit(i, bit_set_ptr, num_bits) {
+ const char *sensor_name = hwmon_get_sensor_name(hwmon, i + bit_set_offset);
+
+ mlx5_core_warn(dev, "Sensor name[%d]: %s\n", i + bit_set_offset, sensor_name);
+ }
+}
+#endif /* CONFIG_HWMON */
+
/* type == MLX5_EVENT_TYPE_TEMP_WARN_EVENT */
static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
{
struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_core_dev *dev = events->dev;
struct mlx5_eqe *eqe = data;
u64 value_lsb;
u64 value_msb;
value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
+ /* bit 1-63 are not supported for NICs,
+ * hence read only bit 0 (asic) from lsb.
+ */
+ value_lsb &= 0x1;
value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
- mlx5_core_warn(events->dev,
- "High temperature on sensors with bit set %llx %llx",
- value_msb, value_lsb);
+ if (net_ratelimit()) {
+ mlx5_core_warn(dev, "High temperature on sensors with bit set %#llx %#llx.\n",
+ value_msb, value_lsb);
+#if IS_ENABLED(CONFIG_HWMON)
+ if (dev->hwmon) {
+ print_sensor_names_in_bit_set(dev, dev->hwmon, value_lsb, 0);
+ print_sensor_names_in_bit_set(dev, dev->hwmon, value_msb,
+ sizeof(value_lsb) * BITS_PER_BYTE);
+ }
+#endif
+ }
return NOTIFY_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 676005854dad..1af76da8b132 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -217,7 +217,8 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
int err;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
- underlay_qpn == 0)
+ underlay_qpn == 0 &&
+ (ft->type != FS_FT_RDMA_RX && ft->type != FS_FT_RDMA_TX))
return 0;
if (ft->type == FS_FT_FDB &&
@@ -526,7 +527,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_rule *dst;
void *in_flow_context, *vlan;
void *in_match_value;
- int reformat_id = 0;
+ u32 reformat_id = 0;
unsigned int inlen;
int dst_cnt_size;
u32 *in, action;
@@ -579,23 +580,21 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, action, action);
if (!extended_dest && fte->act_dests.action.pkt_reformat) {
- struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat;
-
- if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
- reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
- if (reformat_id < 0) {
- mlx5_core_err(dev,
- "Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n",
- pkt_reformat->reformat_type);
- err = reformat_id;
- goto err_out;
- }
- } else {
- reformat_id = fte->act_dests.action.pkt_reformat->id;
+ struct mlx5_pkt_reformat *pkt_reformat =
+ fte->act_dests.action.pkt_reformat;
+
+ err = mlx5_fs_get_packet_reformat_id(pkt_reformat,
+ &reformat_id);
+ if (err) {
+ mlx5_core_err(dev,
+ "Unsupported pkt_reformat type (%d)\n",
+ pkt_reformat->reformat_type);
+ goto err_out;
}
}
- MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ reformat_id);
if (fte->act_dests.action.modify_hdr) {
if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
@@ -718,7 +717,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
continue;
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
- dst->dest_attr.counter_id);
+ mlx5_fc_id(dst->dest_attr.counter));
in_dests += dst_cnt_size;
list_size++;
}
@@ -1141,6 +1140,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_RDMA_RX:
case FS_FT_RDMA_TX:
case FS_FT_PORT_SEL:
+ case FS_FT_RDMA_TRANSPORT_RX:
+ case FS_FT_RDMA_TRANSPORT_TX:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2eabfcc247c6..a8046200d376 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -658,6 +658,7 @@ static void del_sw_hw_rule(struct fs_node *node)
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ mlx5_fc_local_destroy(rule->dest_attr.counter);
goto out;
}
@@ -820,11 +821,17 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
return index;
fte->index = index + fg->start_index;
+retry_insert:
ret = rhashtable_insert_fast(&fg->ftes_hash,
&fte->hash,
rhash_fte);
- if (ret)
+ if (ret) {
+ if (ret == -EBUSY) {
+ cond_resched();
+ goto retry_insert;
+ }
goto err_ida_remove;
+ }
tree_add_node(&fte->node, &fg->node);
list_add_tail(&fte->node.list, &fg->node.children);
@@ -1449,7 +1456,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table *ft;
int autogroups_max_fte;
- ft = mlx5_create_flow_table(ns, ft_attr);
+ ft = mlx5_create_vport_flow_table(ns, ft_attr, ft_attr->vport);
if (IS_ERR(ft))
return ft;
@@ -1823,14 +1830,35 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err;
}
+int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *id)
+{
+ switch (pkt_reformat->owner) {
+ case MLX5_FLOW_RESOURCE_OWNER_FW:
+ *id = pkt_reformat->id;
+ return 0;
+ case MLX5_FLOW_RESOURCE_OWNER_SW:
+ return mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat, id);
+ case MLX5_FLOW_RESOURCE_OWNER_HWS:
+ return mlx5_fs_hws_action_get_pkt_reformat_id(pkt_reformat, id);
+ default:
+ return -EINVAL;
+ }
+}
+
static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
struct mlx5_pkt_reformat *p2)
{
- return p1->owner == p2->owner &&
- (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
- p1->id == p2->id :
- mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
- mlx5_fs_dr_action_get_pkt_reformat_id(p2));
+ int err1, err2;
+ u32 id1, id2;
+
+ if (p1->owner != p2->owner)
+ return false;
+
+ err1 = mlx5_fs_get_packet_reformat_id(p1, &id1);
+ err2 = mlx5_fs_get_packet_reformat_id(p2, &id2);
+
+ return !err1 && !err2 && id1 == id2;
}
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
@@ -2200,6 +2228,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct mlx5_flow_handle *rule;
struct match_list *iter;
bool take_write = false;
+ bool try_again = false;
struct fs_fte *fte;
u64 version = 0;
int err;
@@ -2264,6 +2293,7 @@ skip_search:
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
if (!g->node.active) {
+ try_again = true;
up_write_ref_node(&g->node, false);
continue;
}
@@ -2285,7 +2315,8 @@ skip_search:
tree_put_node(&fte->node, false);
return rule;
}
- rule = ERR_PTR(-ENOENT);
+ err = try_again ? -EAGAIN : -ENOENT;
+ rule = ERR_PTR(err);
out:
kmem_cache_free(steering->ftes_cache, fte);
return rule;
@@ -2709,6 +2740,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_NAMESPACE_RDMA_TX:
root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_BYPASS_PRIO;
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
root_ns = steering->rdma_rx_root_ns;
@@ -2756,9 +2788,9 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_flow_namespace);
-struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type type,
- int vport)
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type, int vport_idx)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
@@ -2767,25 +2799,43 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
switch (type) {
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (vport >= steering->esw_egress_acl_vports)
+ if (vport_idx >= steering->esw_egress_acl_vports)
return NULL;
if (steering->esw_egress_root_ns &&
- steering->esw_egress_root_ns[vport])
- return &steering->esw_egress_root_ns[vport]->ns;
+ steering->esw_egress_root_ns[vport_idx])
+ return &steering->esw_egress_root_ns[vport_idx]->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (vport >= steering->esw_ingress_acl_vports)
+ if (vport_idx >= steering->esw_ingress_acl_vports)
return NULL;
if (steering->esw_ingress_root_ns &&
- steering->esw_ingress_root_ns[vport])
- return &steering->esw_ingress_root_ns[vport]->ns;
+ steering->esw_ingress_root_ns[vport_idx])
+ return &steering->esw_ingress_root_ns[vport_idx]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
+ if (vport_idx >= steering->rdma_transport_rx_vports)
+ return NULL;
+ if (steering->rdma_transport_rx_root_ns &&
+ steering->rdma_transport_rx_root_ns[vport_idx])
+ return &steering->rdma_transport_rx_root_ns[vport_idx]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
+ if (vport_idx >= steering->rdma_transport_tx_vports)
+ return NULL;
+
+ if (steering->rdma_transport_tx_root_ns &&
+ steering->rdma_transport_tx_root_ns[vport_idx])
+ return &steering->rdma_transport_tx_root_ns[vport_idx]->ns;
else
return NULL;
default:
return NULL;
}
}
+EXPORT_SYMBOL(mlx5_get_flow_vport_namespace);
static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
unsigned int prio,
@@ -3191,6 +3241,127 @@ out_err:
return err;
}
+static int
+init_rdma_transport_rx_root_ns_one(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ struct fs_prio *prio;
+
+ steering->rdma_transport_rx_root_ns[vport_idx] =
+ create_root_ns(steering, FS_FT_RDMA_TRANSPORT_RX);
+ if (!steering->rdma_transport_rx_root_ns[vport_idx])
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->rdma_transport_rx_root_ns[vport_idx]->ns,
+ MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+static int
+init_rdma_transport_tx_root_ns_one(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ struct fs_prio *prio;
+
+ steering->rdma_transport_tx_root_ns[vport_idx] =
+ create_root_ns(steering, FS_FT_RDMA_TRANSPORT_TX);
+ if (!steering->rdma_transport_tx_root_ns[vport_idx])
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->rdma_transport_tx_root_ns[vport_idx]->ns,
+ MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_core_dev *dev = steering->dev;
+ int total_vports;
+ int err;
+ int i;
+
+ /* In case eswitch not supported and working in legacy mode */
+ total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1;
+
+ steering->rdma_transport_rx_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->rdma_transport_rx_root_ns),
+ GFP_KERNEL);
+ if (!steering->rdma_transport_rx_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_rdma_transport_rx_root_ns_one(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->rdma_transport_rx_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ while (i--)
+ cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]);
+ kfree(steering->rdma_transport_rx_root_ns);
+ steering->rdma_transport_rx_root_ns = NULL;
+ return err;
+}
+
+static int init_rdma_transport_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_core_dev *dev = steering->dev;
+ int total_vports;
+ int err;
+ int i;
+
+ /* In case eswitch not supported and working in legacy mode */
+ total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1;
+
+ steering->rdma_transport_tx_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->rdma_transport_tx_root_ns),
+ GFP_KERNEL);
+ if (!steering->rdma_transport_tx_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_rdma_transport_tx_root_ns_one(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->rdma_transport_tx_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ while (i--)
+ cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]);
+ kfree(steering->rdma_transport_tx_root_ns);
+ steering->rdma_transport_tx_root_ns = NULL;
+ return err;
+}
+
+static void cleanup_rdma_transport_roots_ns(struct mlx5_flow_steering *steering)
+{
+ int i;
+
+ if (steering->rdma_transport_rx_root_ns) {
+ for (i = 0; i < steering->rdma_transport_rx_vports; i++)
+ cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]);
+
+ kfree(steering->rdma_transport_rx_root_ns);
+ steering->rdma_transport_rx_root_ns = NULL;
+ }
+
+ if (steering->rdma_transport_tx_root_ns) {
+ for (i = 0; i < steering->rdma_transport_tx_vports; i++)
+ cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]);
+
+ kfree(steering->rdma_transport_tx_root_ns);
+ steering->rdma_transport_tx_root_ns = NULL;
+ }
+}
+
/* FT and tc chains are stored in the same array so we can re-use the
* mlx5_get_fdb_sub_ns() and tc api for FT chains.
* When creating a new ns for each chain store it in the first available slot.
@@ -3528,35 +3699,42 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
char *value = val.vstr;
- int err = 0;
+ u8 eswitch_mode;
- if (!strcmp(value, "dmfs")) {
+ if (!strcmp(value, "dmfs"))
return 0;
- } else if (!strcmp(value, "smfs")) {
- u8 eswitch_mode;
- bool smfs_cap;
- eswitch_mode = mlx5_eswitch_mode(dev);
- smfs_cap = mlx5_fs_dr_is_supported(dev);
+ if (!strcmp(value, "smfs")) {
+ bool smfs_cap = mlx5_fs_dr_is_supported(dev);
if (!smfs_cap) {
- err = -EOPNOTSUPP;
NL_SET_ERR_MSG_MOD(extack,
"Software managed steering is not supported by current device");
+ return -EOPNOTSUPP;
}
+ } else if (!strcmp(value, "hmfs")) {
+ bool hmfs_cap = mlx5_fs_hws_is_supported(dev);
- else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ if (!hmfs_cap) {
NL_SET_ERR_MSG_MOD(extack,
- "Software managed steering is not supported when eswitch offloads enabled.");
- err = -EOPNOTSUPP;
+ "Hardware steering is not supported by current device");
+ return -EOPNOTSUPP;
}
} else {
NL_SET_ERR_MSG_MOD(extack,
- "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
- err = -EINVAL;
+ "Bad parameter: supported values are [\"dmfs\", \"smfs\", \"hmfs\"]");
+ return -EINVAL;
}
- return err;
+ eswitch_mode = mlx5_eswitch_mode(dev);
+ if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Moving to %s is not supported when eswitch offloads enabled.",
+ value);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
@@ -3568,6 +3746,8 @@ static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
if (!strcmp(ctx->val.vstr, "smfs"))
mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else if (!strcmp(ctx->val.vstr, "hmfs"))
+ mode = MLX5_FLOW_STEERING_MODE_HMFS;
else
mode = MLX5_FLOW_STEERING_MODE_DMFS;
dev->priv.steering->mode = mode;
@@ -3580,10 +3760,17 @@ static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
+ switch (dev->priv.steering->mode) {
+ case MLX5_FLOW_STEERING_MODE_SMFS:
strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr));
- else
+ break;
+ case MLX5_FLOW_STEERING_MODE_HMFS:
+ strscpy(ctx->val.vstr, "hmfs", sizeof(ctx->val.vstr));
+ break;
+ default:
strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr));
+ }
+
return 0;
}
@@ -3607,6 +3794,7 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
+ cleanup_rdma_transport_roots_ns(steering);
devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params,
ARRAY_SIZE(mlx5_fs_params));
@@ -3658,8 +3846,7 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
- MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
+ if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support)) {
err = init_rdma_rx_root_ns(steering);
if (err)
goto err;
@@ -3677,6 +3864,18 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(dev, ft_support)) {
+ err = init_rdma_transport_rx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(dev, ft_support)) {
+ err = init_rdma_transport_tx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
return 0;
err:
@@ -3827,8 +4026,10 @@ mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type
struct mlx5_flow_namespace *ns;
if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
- ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
- ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
+ ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX)
+ ns = mlx5_get_flow_vport_namespace(dev, ns_type, 0);
else
ns = mlx5_get_flow_namespace(dev, ns_type);
if (!ns)
@@ -4003,6 +4204,8 @@ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
cmds = mlx5_fs_cmd_get_dr_cmds();
+ else if (mode == MLX5_FLOW_STEERING_MODE_HMFS)
+ cmds = mlx5_fs_cmd_get_hws_cmds();
else
cmds = mlx5_fs_cmd_get_fw_cmds();
if (!cmds)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index bad2df0715ec..500826229b0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -38,6 +38,7 @@
#include <linux/rhashtable.h>
#include <linux/llist.h>
#include <steering/sws/fs_dr.h>
+#include <steering/hws/fs_hws.h>
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
@@ -57,6 +58,7 @@ struct mlx5_flow_definer {
enum mlx5_flow_resource_owner {
MLX5_FLOW_RESOURCE_OWNER_FW,
MLX5_FLOW_RESOURCE_OWNER_SW,
+ MLX5_FLOW_RESOURCE_OWNER_HWS,
};
struct mlx5_modify_hdr {
@@ -64,6 +66,7 @@ struct mlx5_modify_hdr {
enum mlx5_flow_resource_owner owner;
union {
struct mlx5_fs_dr_action fs_dr_action;
+ struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
@@ -74,6 +77,7 @@ struct mlx5_pkt_reformat {
enum mlx5_flow_resource_owner owner;
union {
struct mlx5_fs_dr_action fs_dr_action;
+ struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
@@ -112,7 +116,9 @@ enum fs_flow_table_type {
FS_FT_PORT_SEL = 0X9,
FS_FT_FDB_RX = 0xa,
FS_FT_FDB_TX = 0xb,
- FS_FT_MAX_TYPE = FS_FT_FDB_TX,
+ FS_FT_RDMA_TRANSPORT_RX = 0xd,
+ FS_FT_RDMA_TRANSPORT_TX = 0xe,
+ FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX,
};
enum fs_flow_table_op_mod {
@@ -126,7 +132,8 @@ enum fs_fte_status {
enum mlx5_flow_steering_mode {
MLX5_FLOW_STEERING_MODE_DMFS,
- MLX5_FLOW_STEERING_MODE_SMFS
+ MLX5_FLOW_STEERING_MODE_SMFS,
+ MLX5_FLOW_STEERING_MODE_HMFS,
};
enum mlx5_flow_steering_capabilty {
@@ -154,6 +161,10 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *port_sel_root_ns;
int esw_egress_acl_vports;
int esw_ingress_acl_vports;
+ struct mlx5_flow_root_namespace **rdma_transport_rx_root_ns;
+ struct mlx5_flow_root_namespace **rdma_transport_tx_root_ns;
+ int rdma_transport_rx_vports;
+ int rdma_transport_tx_vports;
};
struct fs_node {
@@ -190,7 +201,10 @@ struct mlx5_flow_handle {
/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
struct fs_node node;
- struct mlx5_fs_dr_table fs_dr_table;
+ union {
+ struct mlx5_fs_dr_table fs_dr_table;
+ struct mlx5_fs_hws_table fs_hws_table;
+ };
u32 id;
u16 vport;
unsigned int max_fte;
@@ -247,7 +261,10 @@ struct fs_fte_dup {
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
- struct mlx5_fs_dr_rule fs_dr_rule;
+ union {
+ struct mlx5_fs_dr_rule fs_dr_rule;
+ struct mlx5_fs_hws_rule fs_hws_rule;
+ };
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
struct fs_fte_action act_dests;
struct fs_fte_dup *dup;
@@ -280,7 +297,10 @@ struct mlx5_flow_group_mask {
/* Type of children is fs_fte */
struct mlx5_flow_group {
struct fs_node node;
- struct mlx5_fs_dr_matcher fs_dr_matcher;
+ union {
+ struct mlx5_fs_dr_matcher fs_dr_matcher;
+ struct mlx5_fs_hws_matcher fs_hws_matcher;
+ };
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
@@ -293,7 +313,10 @@ struct mlx5_flow_group {
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
enum mlx5_flow_steering_mode mode;
- struct mlx5_fs_dr_domain fs_dr_domain;
+ union {
+ struct mlx5_fs_dr_domain fs_dr_domain;
+ struct mlx5_fs_hws_context fs_hws_context;
+ };
enum fs_flow_table_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
@@ -303,6 +326,36 @@ struct mlx5_flow_root_namespace {
const struct mlx5_flow_cmds *cmds;
};
+enum mlx5_fc_type {
+ MLX5_FC_TYPE_ACQUIRED = 0,
+ MLX5_FC_TYPE_LOCAL,
+};
+
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ u32 id;
+ bool aging;
+ enum mlx5_fc_type type;
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc_cache cache;
+ /* last{packets,bytes} are used for calculating deltas since last reading. */
+ u64 lastpackets;
+ u64 lastbytes;
+};
+
+struct mlx5_fc_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ u32 base_id;
+ struct mlx5_fs_hws_data hws_data;
+ struct mlx5_fc fcs[];
+};
+
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
@@ -334,6 +387,9 @@ u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace
struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
+int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *id);
+
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
#define fs_list_for_each_entry(pos, root) \
@@ -382,7 +438,9 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
(type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
(type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_FDB_TX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_RDMA_TRANSPORT_RX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) : \
+ (type == FS_FT_RDMA_TRANSPORT_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TRANSPORT_TX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 62d0c689796b..492775d3d193 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "fs_core.h"
+#include "fs_pool.h"
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
@@ -43,33 +44,6 @@
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
-struct mlx5_fc_cache {
- u64 packets;
- u64 bytes;
- u64 lastuse;
-};
-
-struct mlx5_fc {
- u32 id;
- bool aging;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_cache cache;
- /* last{packets,bytes} are used for calculating deltas since last reading. */
- u64 lastpackets;
- u64 lastbytes;
-};
-
-struct mlx5_fc_pool {
- struct mlx5_core_dev *dev;
- struct mutex pool_lock; /* protects pool lists */
- struct list_head fully_used;
- struct list_head partially_used;
- struct list_head unused;
- int available_fcs;
- int used_fcs;
- int threshold;
-};
-
struct mlx5_fc_stats {
struct xarray counters;
@@ -80,13 +54,13 @@ struct mlx5_fc_stats {
int bulk_query_len;
bool bulk_query_alloc_failed;
unsigned long next_bulk_query_alloc;
- struct mlx5_fc_pool fc_pool;
+ struct mlx5_fs_pool fc_pool;
};
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
-static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
-static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
+static void mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev);
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool);
+static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool);
+static void mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc);
static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
@@ -186,6 +160,9 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
+ if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
+ return;
+
if (counter->bulk)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
else
@@ -435,15 +412,7 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
fc_stats->sampling_interval);
}
-/* Flow counter bluks */
-
-struct mlx5_fc_bulk {
- struct list_head pool_list;
- u32 base_id;
- int bulk_len;
- unsigned long *bitmask;
- struct mlx5_fc fcs[] __counted_by(bulk_len);
-};
+/* Flow counter bulks */
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
u32 id)
@@ -452,16 +421,16 @@ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
counter->id = id;
}
-static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter)
{
- return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+ return counter->bulk->base_id;
}
-static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
+static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
+ void *pool_ctx)
{
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
- struct mlx5_fc_bulk *bulk;
- int err = -ENOMEM;
+ struct mlx5_fc_bulk *fc_bulk;
int bulk_len;
u32 base_id;
int i;
@@ -469,207 +438,141 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
- if (!bulk)
- goto err_alloc_bulk;
-
- bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
- GFP_KERNEL);
- if (!bulk->bitmask)
- goto err_alloc_bitmask;
+ fc_bulk = kvzalloc(struct_size(fc_bulk, fcs, bulk_len), GFP_KERNEL);
+ if (!fc_bulk)
+ return NULL;
- err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
- if (err)
- goto err_mlx5_cmd_bulk_alloc;
+ if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len))
+ goto fc_bulk_free;
- bulk->base_id = base_id;
- bulk->bulk_len = bulk_len;
- for (i = 0; i < bulk_len; i++) {
- mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
- set_bit(i, bulk->bitmask);
- }
+ if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id))
+ goto fs_bulk_cleanup;
+ fc_bulk->base_id = base_id;
+ for (i = 0; i < bulk_len; i++)
+ mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
- return bulk;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
+ return &fc_bulk->fs_bulk;
-err_mlx5_cmd_bulk_alloc:
- kvfree(bulk->bitmask);
-err_alloc_bitmask:
- kvfree(bulk);
-err_alloc_bulk:
- return ERR_PTR(err);
+fs_bulk_cleanup:
+ mlx5_fs_bulk_cleanup(&fc_bulk->fs_bulk);
+fc_bulk_free:
+ kvfree(fc_bulk);
+ return NULL;
}
static int
-mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
+mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
{
- if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
+ struct mlx5_fc_bulk *fc_bulk = container_of(fs_bulk,
+ struct mlx5_fc_bulk,
+ fs_bulk);
+
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
return -EBUSY;
}
- mlx5_cmd_fc_free(dev, bulk->base_id);
- kvfree(bulk->bitmask);
- kvfree(bulk);
+ mlx5_cmd_fc_free(dev, fc_bulk->base_id);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(fc_bulk);
return 0;
}
-static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
+static void mlx5_fc_pool_update_threshold(struct mlx5_fs_pool *fc_pool)
{
- int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
-
- if (free_fc_index >= bulk->bulk_len)
- return ERR_PTR(-ENOSPC);
-
- clear_bit(free_fc_index, bulk->bitmask);
- return &bulk->fcs[free_fc_index];
-}
-
-static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
-{
- int fc_index = fc->id - bulk->base_id;
-
- if (test_bit(fc_index, bulk->bitmask))
- return -EINVAL;
-
- set_bit(fc_index, bulk->bitmask);
- return 0;
+ fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
+ fc_pool->used_units / MLX5_FC_POOL_USED_BUFF_RATIO);
}
/* Flow counters pool API */
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
-{
- fc_pool->dev = dev;
- mutex_init(&fc_pool->pool_lock);
- INIT_LIST_HEAD(&fc_pool->fully_used);
- INIT_LIST_HEAD(&fc_pool->partially_used);
- INIT_LIST_HEAD(&fc_pool->unused);
- fc_pool->available_fcs = 0;
- fc_pool->used_fcs = 0;
- fc_pool->threshold = 0;
-}
+static const struct mlx5_fs_pool_ops mlx5_fc_pool_ops = {
+ .bulk_destroy = mlx5_fc_bulk_destroy,
+ .bulk_create = mlx5_fc_bulk_create,
+ .update_threshold = mlx5_fc_pool_update_threshold,
+};
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
+static void
+mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_bulk *tmp;
-
- list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
+ mlx5_fs_pool_init(fc_pool, dev, &mlx5_fc_pool_ops, NULL);
}
-static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool)
{
- fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
- fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
+ mlx5_fs_pool_cleanup(fc_pool);
}
-static struct mlx5_fc_bulk *
-mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *new_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fc_bulk *fc_bulk;
+ int err;
- new_bulk = mlx5_fc_bulk_create(dev);
- if (!IS_ERR(new_bulk))
- fc_pool->available_fcs += new_bulk->bulk_len;
- mlx5_fc_pool_update_threshold(fc_pool);
- return new_bulk;
+ err = mlx5_fs_pool_acquire_index(fc_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ fc_bulk = container_of(pool_index.fs_bulk, struct mlx5_fc_bulk, fs_bulk);
+ return &fc_bulk->fcs[pool_index.index];
}
static void
-mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
+mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc)
{
+ struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
struct mlx5_core_dev *dev = fc_pool->dev;
- fc_pool->available_fcs -= bulk->bulk_len;
- mlx5_fc_bulk_destroy(dev, bulk);
- mlx5_fc_pool_update_threshold(fc_pool);
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = fc->id - fc->bulk->base_id;
+ if (mlx5_fs_pool_release_index(fc_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
}
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
- struct list_head *next_list,
- bool move_non_full_bulk)
+/**
+ * mlx5_fc_local_create - Allocate mlx5_fc struct for a counter which
+ * was already acquired using its counter id and bulk data.
+ *
+ * @counter_id: counter acquired counter id
+ * @offset: counter offset from bulk base
+ * @bulk_size: counter's bulk size as was allocated
+ *
+ * Return: Pointer to mlx5_fc on success, ERR_PTR otherwise.
+ */
+struct mlx5_fc *
+mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
{
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc *fc;
-
- if (list_empty(src_list))
- return ERR_PTR(-ENODATA);
-
- bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
- fc = mlx5_fc_bulk_acquire_fc(bulk);
- if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
- list_move(&bulk->pool_list, next_list);
- return fc;
-}
+ struct mlx5_fc_bulk *fc_bulk;
+ struct mlx5_fc *counter;
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
-{
- struct mlx5_fc_bulk *new_bulk;
- struct mlx5_fc *fc;
-
- mutex_lock(&fc_pool->pool_lock);
-
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
- &fc_pool->fully_used, false);
- if (IS_ERR(fc))
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
- &fc_pool->partially_used,
- true);
- if (IS_ERR(fc)) {
- new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
- if (IS_ERR(new_bulk)) {
- fc = ERR_CAST(new_bulk);
- goto out;
- }
- fc = mlx5_fc_bulk_acquire_fc(new_bulk);
- list_add(&new_bulk->pool_list, &fc_pool->partially_used);
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+ fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
+ if (!fc_bulk) {
+ kfree(counter);
+ return ERR_PTR(-ENOMEM);
}
- fc_pool->available_fcs--;
- fc_pool->used_fcs++;
-out:
- mutex_unlock(&fc_pool->pool_lock);
- return fc;
+ counter->type = MLX5_FC_TYPE_LOCAL;
+ counter->id = counter_id;
+ fc_bulk->base_id = counter_id - offset;
+ fc_bulk->fs_bulk.bulk_len = bulk_size;
+ counter->bulk = fc_bulk;
+ return counter;
}
+EXPORT_SYMBOL(mlx5_fc_local_create);
-static void
-mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
+void mlx5_fc_local_destroy(struct mlx5_fc *counter)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk = fc->bulk;
- int bulk_free_fcs_amount;
-
- mutex_lock(&fc_pool->pool_lock);
-
- if (mlx5_fc_bulk_release_fc(bulk, fc)) {
- mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
- goto unlock;
- }
-
- fc_pool->available_fcs++;
- fc_pool->used_fcs--;
-
- bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
- if (bulk_free_fcs_amount == 1)
- list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
- if (bulk_free_fcs_amount == bulk->bulk_len) {
- list_del(&bulk->pool_list);
- if (fc_pool->available_fcs > fc_pool->threshold)
- mlx5_fc_pool_free_bulk(fc_pool, bulk);
- else
- list_add(&bulk->pool_list, &fc_pool->unused);
- }
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
-unlock:
- mutex_unlock(&fc_pool->pool_lock);
+ kfree(counter->bulk);
+ kfree(counter);
}
+EXPORT_SYMBOL(mlx5_fc_local_destroy);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
index c14590acc772..f6abfd00d7e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
@@ -50,10 +50,12 @@ mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type tab
int i, found_i = -1;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
- if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
+ if (dev->priv.ft_pool->ft_left[i] &&
+ (FT_POOLS[i] >= desired_size ||
+ desired_size == MLX5_FS_MAX_POOL_SIZE) &&
FT_POOLS[i] <= max_ft_size) {
found_i = i;
- if (desired_size != POOL_NEXT_SIZE)
+ if (desired_size != MLX5_FS_MAX_POOL_SIZE)
break;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
index 25f4274b372b..173e312db720 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
@@ -7,8 +7,6 @@
#include <linux/mlx5/driver.h>
#include "fs_core.h"
-#define POOL_NEXT_SIZE 0
-
int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
new file mode 100644
index 000000000000..f6c226664602
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_pool.h"
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len)
+{
+ int i;
+
+ fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!fs_bulk->bitmask)
+ return -ENOMEM;
+
+ fs_bulk->bulk_len = bulk_len;
+ for (i = 0; i < bulk_len; i++)
+ set_bit(i, fs_bulk->bitmask);
+
+ return 0;
+}
+
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk)
+{
+ kvfree(fs_bulk->bitmask);
+}
+
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk)
+{
+ return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+}
+
+static int mlx5_fs_bulk_acquire_index(struct mlx5_fs_bulk *fs_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ int free_index = find_first_bit(fs_bulk->bitmask, fs_bulk->bulk_len);
+
+ WARN_ON_ONCE(!pool_index || !fs_bulk);
+ if (free_index >= fs_bulk->bulk_len)
+ return -ENOSPC;
+
+ clear_bit(free_index, fs_bulk->bitmask);
+ pool_index->fs_bulk = fs_bulk;
+ pool_index->index = free_index;
+ return 0;
+}
+
+static int mlx5_fs_bulk_release_index(struct mlx5_fs_bulk *fs_bulk, int index)
+{
+ if (test_bit(index, fs_bulk->bitmask))
+ return -EINVAL;
+
+ set_bit(index, fs_bulk->bitmask);
+ return 0;
+}
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops, void *pool_ctx)
+{
+ WARN_ON_ONCE(!ops || !ops->bulk_destroy || !ops->bulk_create ||
+ !ops->update_threshold);
+ pool->dev = dev;
+ pool->pool_ctx = pool_ctx;
+ mutex_init(&pool->pool_lock);
+ INIT_LIST_HEAD(&pool->fully_used);
+ INIT_LIST_HEAD(&pool->partially_used);
+ INIT_LIST_HEAD(&pool->unused);
+ pool->available_units = 0;
+ pool->used_units = 0;
+ pool->threshold = 0;
+ pool->ops = ops;
+}
+
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool)
+{
+ struct mlx5_core_dev *dev = pool->dev;
+ struct mlx5_fs_bulk *bulk;
+ struct mlx5_fs_bulk *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, &pool->fully_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->partially_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->unused, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_pool_alloc_new_bulk(struct mlx5_fs_pool *fs_pool)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+ struct mlx5_fs_bulk *new_bulk;
+
+ new_bulk = fs_pool->ops->bulk_create(dev, fs_pool->pool_ctx);
+ if (new_bulk)
+ fs_pool->available_units += new_bulk->bulk_len;
+ fs_pool->ops->update_threshold(fs_pool);
+ return new_bulk;
+}
+
+static void
+mlx5_fs_pool_free_bulk(struct mlx5_fs_pool *fs_pool, struct mlx5_fs_bulk *bulk)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+
+ fs_pool->available_units -= bulk->bulk_len;
+ fs_pool->ops->bulk_destroy(dev, bulk);
+ fs_pool->ops->update_threshold(fs_pool);
+}
+
+static int
+mlx5_fs_pool_acquire_from_list(struct list_head *src_list,
+ struct list_head *next_list,
+ bool move_non_full_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *fs_bulk;
+ int err;
+
+ if (list_empty(src_list))
+ return -ENODATA;
+
+ fs_bulk = list_first_entry(src_list, struct mlx5_fs_bulk, pool_list);
+ err = mlx5_fs_bulk_acquire_index(fs_bulk, pool_index);
+ if (move_non_full_bulk || mlx5_fs_bulk_get_free_amount(fs_bulk) == 0)
+ list_move(&fs_bulk->pool_list, next_list);
+ return err;
+}
+
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *new_bulk;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->partially_used,
+ &fs_pool->fully_used, false,
+ pool_index);
+ if (err)
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->unused,
+ &fs_pool->partially_used,
+ true, pool_index);
+ if (err) {
+ new_bulk = mlx5_fs_pool_alloc_new_bulk(fs_pool);
+ if (!new_bulk) {
+ err = -ENOENT;
+ goto out;
+ }
+ err = mlx5_fs_bulk_acquire_index(new_bulk, pool_index);
+ WARN_ON_ONCE(err);
+ list_add(&new_bulk->pool_list, &fs_pool->partially_used);
+ }
+ fs_pool->available_units--;
+ fs_pool->used_units++;
+
+out:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
+
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *bulk = pool_index->fs_bulk;
+ int bulk_free_amount;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ /* TBD would rather return void if there was no warn here in original code */
+ err = mlx5_fs_bulk_release_index(bulk, pool_index->index);
+ if (err)
+ goto unlock;
+
+ fs_pool->available_units++;
+ fs_pool->used_units--;
+
+ bulk_free_amount = mlx5_fs_bulk_get_free_amount(bulk);
+ if (bulk_free_amount == 1)
+ list_move_tail(&bulk->pool_list, &fs_pool->partially_used);
+ if (bulk_free_amount == bulk->bulk_len) {
+ list_del(&bulk->pool_list);
+ if (fs_pool->available_units > fs_pool->threshold)
+ mlx5_fs_pool_free_bulk(fs_pool, bulk);
+ else
+ list_add(&bulk->pool_list, &fs_pool->unused);
+ }
+
+unlock:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
new file mode 100644
index 000000000000..f04ec3107498
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_POOL_H__
+#define __MLX5_FS_POOL_H__
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_fs_bulk {
+ struct list_head pool_list;
+ int bulk_len;
+ unsigned long *bitmask;
+};
+
+struct mlx5_fs_pool_index {
+ struct mlx5_fs_bulk *fs_bulk;
+ int index;
+};
+
+struct mlx5_fs_pool;
+
+struct mlx5_fs_pool_ops {
+ int (*bulk_destroy)(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *bulk);
+ struct mlx5_fs_bulk * (*bulk_create)(struct mlx5_core_dev *dev,
+ void *pool_ctx);
+ void (*update_threshold)(struct mlx5_fs_pool *pool);
+};
+
+struct mlx5_fs_pool {
+ struct mlx5_core_dev *dev;
+ void *pool_ctx;
+ const struct mlx5_fs_pool_ops *ops;
+ struct mutex pool_lock; /* protects pool lists */
+ struct list_head fully_used;
+ struct list_head partially_used;
+ struct list_head unused;
+ int available_units;
+ int used_units;
+ int threshold;
+};
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len);
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk);
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk);
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops, void *pool_ctx);
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool);
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+
+#endif /* __MLX5_FS_POOL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 76ad46bf477d..57476487e31f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -281,6 +281,19 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, shampo)) {
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_SHAMPO, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, adv_rdma)) {
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ADV_RDMA,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 566710d34a7b..69933addd921 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -246,7 +246,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
- del_timer_sync(&fw_reset->timer);
+ timer_delete_sync(&fw_reset->timer);
}
static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
@@ -278,7 +278,8 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
static void poll_sync_reset(struct timer_list *t)
{
- struct mlx5_fw_reset *fw_reset = from_timer(fw_reset, t, timer);
+ struct mlx5_fw_reset *fw_reset = timer_container_of(fw_reset, t,
+ timer);
struct mlx5_core_dev *dev = fw_reset->dev;
u32 fatal_error;
@@ -345,15 +346,12 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
-static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
+static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev,
+ struct pci_dev *bridge)
{
- struct pci_dev *bridge = dev->pdev->bus->self;
u16 reg16;
int err;
- if (!bridge)
- return -EOPNOTSUPP;
-
err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
if (err)
return err;
@@ -416,9 +414,15 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
u8 reset_method)
{
+ struct pci_dev *bridge = dev->pdev->bus->self;
u16 dev_id;
int err;
+ if (!bridge) {
+ mlx5_core_warn(dev, "PCI bus bridge is not accessible\n");
+ return false;
+ }
+
if (!MLX5_CAP_GEN(dev, fast_teardown)) {
mlx5_core_warn(dev, "fast teardown is not supported by firmware\n");
return false;
@@ -426,7 +430,7 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
- err = mlx5_check_hotplug_interrupt(dev);
+ err = mlx5_check_hotplug_interrupt(dev, bridge);
if (err)
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index a6329ca2d9bf..cf7a1edd0530 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -96,6 +96,11 @@ static int mlx5_health_get_rfr(u8 rfr_severity)
return rfr_severity >> MLX5_RFR_BIT_OFFSET;
}
+static int mlx5_health_get_crr(u8 rfr_severity)
+{
+ return (rfr_severity >> MLX5_CRR_BIT_OFFSET) & 0x01;
+}
+
static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -375,6 +380,8 @@ static const char *hsynd_str(u8 synd)
return "High temperature";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR:
return "ICM fetch PCI data poisoned error";
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_TRUST_LOCKDOWN_ERR:
+ return "Trust lockdown error";
default:
return "unrecognized error";
}
@@ -442,12 +449,15 @@ static void print_health_info(struct mlx5_core_dev *dev)
mlx5_log(dev, severity, "time %u\n", ioread32be(&h->time));
mlx5_log(dev, severity, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
mlx5_log(dev, severity, "rfr %d\n", mlx5_health_get_rfr(rfr_severity));
+ mlx5_log(dev, severity, "crr %d\n", mlx5_health_get_crr(rfr_severity));
mlx5_log(dev, severity, "severity %d (%s)\n", severity, mlx5_loglevel_str(severity));
mlx5_log(dev, severity, "irisc_index %d\n", ioread8(&h->irisc_index));
mlx5_log(dev, severity, "synd 0x%x: %s\n", ioread8(&h->synd),
hsynd_str(ioread8(&h->synd)));
mlx5_log(dev, severity, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
mlx5_log(dev, severity, "raw fw_ver 0x%08x\n", ioread32be(&h->fw_ver));
+ if (mlx5_health_get_crr(rfr_severity))
+ mlx5_core_warn(dev, "Cold reset is required\n");
}
static int
@@ -769,7 +779,8 @@ static void mlx5_health_log_ts_update(struct work_struct *work)
static void poll_health(struct timer_list *t)
{
- struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
+ struct mlx5_core_dev *dev = timer_container_of(dev, t,
+ priv.health.timer);
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
u32 fatal_error;
@@ -799,14 +810,17 @@ static void poll_health(struct timer_list *t)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
mlx5_core_err(dev, "device's health compromised - reached miss count\n");
+ health->synd = ioread8(&h->synd);
print_health_info(dev);
queue_work(health->wq, &health->report_work);
}
prev_synd = health->synd;
health->synd = ioread8(&h->synd);
- if (health->synd && health->synd != prev_synd)
+ if (health->synd && health->synd != prev_synd) {
+ print_health_info(dev);
queue_work(health->wq, &health->report_work);
+ }
out:
mod_timer(&health->timer, get_next_poll_jiffies(dev));
@@ -834,7 +848,7 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
if (disable_health)
set_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
- del_timer_sync(&health->timer);
+ timer_delete_sync(&health->timer);
}
void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
index 353f81dccd1c..4ba2636d7fb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
@@ -416,3 +416,8 @@ void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev)
mlx5_hwmon_free(hwmon);
mdev->hwmon = NULL;
}
+
+const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel)
+{
+ return hwmon->temp_channel_desc[channel].sensor_name;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
index 999654a9b9da..f38271c22c10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
@@ -10,6 +10,7 @@
int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev);
void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev);
+const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel);
#else
static inline int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 0979d672d47f..79ae3a51a4b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -32,6 +32,7 @@
#include <rdma/ib_verbs.h>
#include <linux/mlx5/fs.h>
+#include <net/netdev_lock.h>
#include "en.h"
#include "en/params.h"
#include "ipoib.h"
@@ -102,6 +103,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev)
netdev->netdev_ops = &mlx5i_netdev_ops;
netdev->ethtool_ops = &mlx5i_ethtool_ops;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 1477db7f5307..2691d88cdee1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -175,7 +175,7 @@ unlock:
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ struct mlx5_irq_pool *pool = mlx5_irq_get_pool(irq);
int cpu;
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index f4b777d4e108..62b6faa4276a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -105,20 +105,20 @@ static int mapping_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
bool hash = false;
bool lag_active;
+ int i, idx = 0;
int num_ports;
- int i;
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active) {
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, ports,
&num_ports);
hash = true;
} else {
- for (i = 0; i < ldev->ports; i++)
- ports[i] = ldev->v2p_map[i];
+ mlx5_ldev_for_each(i, 0, ldev)
+ ports[idx++] = ldev->v2p_map[i];
num_ports = ldev->ports;
}
}
@@ -144,11 +144,8 @@ static int members_show(struct seq_file *file, void *priv)
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
+ mlx5_ldev_for_each(i, 0, ldev)
seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device));
- }
mutex_unlock(&ldev->lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 7f68468c2e75..d058cbb4a00c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -43,10 +43,6 @@
#include "mp.h"
#include "mpesw.h"
-enum {
- MLX5_LAG_EGRESS_PORT_1 = 1,
- MLX5_LAG_EGRESS_PORT_2,
-};
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -72,7 +68,7 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
int num_enabled;
int idx;
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, enabled_ports,
&num_enabled);
for (idx = 0; idx < num_enabled; idx++)
active_port |= BIT_MASK(enabled_ports[idx]);
@@ -80,23 +76,30 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
return active_port;
}
-static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
- unsigned long flags)
+static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
+ int mode, unsigned long flags)
{
bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
&flags);
int port_sel_mode = get_port_sel_mode(mode, flags);
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
+ u8 *ports = ldev->v2p_map;
+ int idx0, idx1;
void *lag_ctx;
lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
switch (port_sel_mode) {
case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
break;
case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
@@ -113,17 +116,23 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
return mlx5_cmd_exec_in(dev, create_lag, in);
}
-static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
+static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
u8 *ports)
{
u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+ int idx0, idx1;
+
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
MLX5_SET(modify_lag_in, in, field_select, 0x1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
return mlx5_cmd_exec_in(dev, modify_lag, in);
}
@@ -148,33 +157,31 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
-static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
+static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_disabled)
{
int i;
*num_disabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (!tracker->netdev_state[i].tx_enabled ||
!tracker->netdev_state[i].link_up)
ports[(*num_disabled)++] = i;
- }
}
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled)
{
int i;
*num_enabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
ports[(*num_enabled)++] = i;
- }
if (*num_enabled == 0)
- mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled);
+ mlx5_infer_tx_disabled(tracker, ldev, ports, num_enabled);
}
static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
@@ -192,7 +199,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
int j;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
- mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(tracker, ldev, enabled_ports,
&num_enabled);
for (i = 0; i < num_enabled; i++) {
err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1);
@@ -203,7 +210,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
buf[written - 2] = 0;
mlx5_core_info(dev, "lag map active ports: %s\n", buf);
} else {
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
err = scnprintf(buf + written, 10,
@@ -286,13 +293,55 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].netdev == ndev)
return i;
return -ENOENT;
}
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return -ENOENT;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (num == seq)
+ return i;
+ num++;
+ }
+ return -ENOENT;
+}
+
+int mlx5_lag_num_devs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ (void)i;
+ num++;
+ }
+ return num;
+}
+
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev)
+ if (ldev->pf[i].netdev)
+ num++;
+ return num;
+}
+
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
{
return ldev->mode == MLX5_LAG_MODE_ROCE;
@@ -310,7 +359,7 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
* with mapping that points to active ports.
*/
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
- u8 num_ports,
+ struct mlx5_lag *ldev,
u8 buckets,
u8 *ports)
{
@@ -323,7 +372,7 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
int i;
int j;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
enabled[enabled_ports_num++] = i;
@@ -334,15 +383,16 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
/* Use native mapping by default where each port's buckets
* point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc
*/
- for (i = 0; i < num_ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < buckets; j++) {
idx = i * buckets + j;
- ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i;
+ ports[idx] = i + 1;
}
+ }
/* If all ports are disabled/enabled keep native mapping */
- if (enabled_ports_num == num_ports ||
- disabled_ports_num == num_ports)
+ if (enabled_ports_num == ldev->ports ||
+ disabled_ports_num == ldev->ports)
return;
/* Go over the disabled ports and for each assign a random active port */
@@ -358,7 +408,7 @@ static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].has_drop)
return true;
return false;
@@ -368,7 +418,7 @@ static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (!ldev->pf[i].has_drop)
continue;
@@ -396,7 +446,7 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
if (!ldev->tracker.has_inactive)
return;
- mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
+ mlx5_infer_tx_disabled(tracker, ldev, disabled_ports, &num_disabled);
for (i = 0; i < num_disabled; i++) {
disabled_index = disabled_ports[i];
@@ -428,10 +478,15 @@ static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
u8 active_ports;
int ret;
+ if (idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[idx].dev;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
ret = mlx5_lag_port_sel_modify(ldev, ports);
if (ret ||
@@ -442,7 +497,7 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
return mlx5_cmd_modify_active_port(dev0, active_ports);
}
- return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
+ return mlx5_cmd_modify_lag(dev0, ldev, ports);
}
static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev)
@@ -450,7 +505,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
unsigned long flags;
- int i;
+ int i, last_idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
@@ -458,14 +513,17 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
if (!ldev)
goto unlock;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->tracker.netdev_state[i].tx_enabled)
ndev = ldev->pf[i].netdev;
- if (!ndev)
- ndev = ldev->pf[ldev->ports - 1].netdev;
+ if (!ndev) {
+ last_idx = mlx5_lag_get_dev_index_by_seq(ldev, ldev->ports - 1);
+ if (last_idx < 0)
+ goto unlock;
+ ndev = ldev->pf[last_idx].netdev;
+ }
- if (ndev)
- dev_hold(ndev);
+ dev_hold(ndev);
unlock:
spin_unlock_irqrestore(&lag_lock, flags);
@@ -476,16 +534,21 @@ unlock:
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {};
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
int idx;
int err;
int i;
int j;
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
+ if (first_idx < 0)
+ return;
- for (i = 0; i < ldev->ports; i++) {
+ dev0 = ldev->pf[first_idx].dev;
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ports);
+
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ports[idx] == ldev->v2p_map[idx])
@@ -520,10 +583,21 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
}
}
-static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
- unsigned long *flags)
+static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
+ enum mlx5_lag_mode mode,
+ unsigned long *flags)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
+
+ if (first_idx < 0)
+ return -EINVAL;
+
+ if (mode == MLX5_LAG_MODE_MPESW ||
+ mode == MLX5_LAG_MODE_MULTIPATH)
+ return 0;
+
+ dev0 = ldev->pf[first_idx].dev;
if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
if (ldev->ports > 2)
@@ -539,30 +613,10 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
return 0;
}
-static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
- struct lag_tracker *tracker,
- enum mlx5_lag_mode mode,
- unsigned long *flags)
-{
- struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
-
- if (mode == MLX5_LAG_MODE_MPESW)
- return;
-
- if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
- tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
- if (ldev->ports > 2)
- ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
- set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
- }
-}
-
static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
struct lag_tracker *tracker, bool shared_fdb,
unsigned long *flags)
{
- bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
-
*flags = 0;
if (shared_fdb) {
set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
@@ -572,11 +626,7 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
if (mode == MLX5_LAG_MODE_MPESW)
set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
- if (roce_lag)
- return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
-
- mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
- return 0;
+ return mlx5_lag_set_port_sel_mode(ldev, mode, flags);
}
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
@@ -593,12 +643,18 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
+ int i, j;
int err;
- int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
+ mlx5_ldev_for_each(i, first_idx + 1, ldev) {
struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
@@ -608,9 +664,9 @@ static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
}
return 0;
err:
- for (; i > MLX5_LAG_P1; i--)
+ mlx5_ldev_for_each_reverse(j, i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
- ldev->pf[i].dev->priv.eswitch);
+ ldev->pf[j].dev->priv.eswitch);
return err;
}
@@ -620,16 +676,21 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
unsigned long flags)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
+ struct mlx5_core_dev *dev0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
if (tracker)
mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
+ err = mlx5_cmd_create_lag(dev0, ldev, mode, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -661,17 +722,22 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
enum mlx5_lag_mode mode,
bool shared_fdb)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
unsigned long flags = 0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
if (err)
return err;
if (mode != MLX5_LAG_MODE_MPESW) {
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ldev->v2p_map);
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
ldev->v2p_map);
@@ -709,20 +775,26 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
unsigned long flags = ldev->mode_flags;
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
int err;
int i;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
ldev->mode = MLX5_LAG_MODE_NONE;
ldev->mode_flags = 0;
mlx5_lag_mp_reset(ldev);
if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
ldev->pf[i].dev->priv.eswitch);
clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
@@ -754,6 +826,7 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
#ifdef CONFIG_MLX5_ESWITCH
struct mlx5_core_dev *dev;
u8 mode;
@@ -761,30 +834,29 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
bool roce_support;
int i;
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].dev)
- return false;
+ if (first_idx < 0 || mlx5_lag_num_devs(ldev) != ldev->ports)
+ return false;
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[first_idx].dev;
mode = mlx5_eswitch_mode(dev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
#else
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif
- roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
- for (i = 1; i < ldev->ports; i++)
+ roce_support = mlx5_get_roce_state(ldev->pf[first_idx].dev);
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
return false;
@@ -795,10 +867,7 @@ void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -812,10 +881,7 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -828,11 +894,16 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
bool roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
roce_lag = __mlx5_lag_is_roce(ldev);
if (shared_fdb) {
@@ -842,7 +913,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
}
- for (i = 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, idx + 1, ldev)
mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
}
@@ -854,17 +925,21 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_lag_add_devices(ldev);
if (shared_fdb)
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
-static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev;
int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (idx < 0)
+ return false;
+
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
dev = ldev->pf[i].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
@@ -876,7 +951,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[idx].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
@@ -892,11 +967,11 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
bool roce_lag = true;
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
#endif
@@ -917,13 +992,18 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct lag_tracker tracker = { };
+ struct mlx5_core_dev *dev0;
struct net_device *ndev;
bool do_bond, roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
if (!mlx5_lag_is_ready(ldev)) {
do_bond = false;
} else {
@@ -937,7 +1017,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
}
if (do_bond && !__mlx5_lag_is_active(ldev)) {
- bool shared_fdb = mlx5_shared_fdb_supported(ldev);
+ bool shared_fdb = mlx5_lag_shared_fdb_supported(ldev);
roce_lag = mlx5_lag_is_roce_lag(ldev);
@@ -951,12 +1031,16 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (err) {
if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
+ if (shared_fdb) {
+ mlx5_ldev_for_each(i, 0, ldev)
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ }
return;
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 1; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
if (mlx5_get_roce_state(ldev->pf[i].dev))
mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
}
@@ -966,7 +1050,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
break;
@@ -977,7 +1061,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
@@ -1010,12 +1094,9 @@ struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
int i;
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (ldev->pf[i].dev) {
- devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
- break;
- }
- }
+ i = mlx5_get_next_ldev_func(ldev, 0);
+ if (i < MLX5_MAX_PORTS)
+ devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
mutex_unlock(&ldev->lock);
return devcom;
}
@@ -1068,7 +1149,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
u8 bond_status = 0;
int num_slaves = 0;
int changed = 0;
- int idx;
+ int i, idx = -1;
if (!netif_is_lag_master(upper))
return 0;
@@ -1083,8 +1164,13 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
*/
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
- idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx >= 0) {
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == ndev_tmp) {
+ idx++;
+ break;
+ }
+ }
+ if (i < MLX5_MAX_PORTS) {
slave = bond_slave_get_rcu(ndev_tmp);
if (slave)
has_inactive |= bond_is_slave_inactive(slave);
@@ -1234,15 +1320,12 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
}
static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
unsigned long flags;
- if (fn >= ldev->ports)
- return;
-
spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
@@ -1257,7 +1340,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
int i;
spin_lock_irqsave(&lag_lock, flags);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
@@ -1267,13 +1350,10 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev)
+ struct mlx5_core_dev *dev)
{
unsigned int fn = mlx5_get_dev_index(dev);
- if (fn >= ldev->ports)
- return;
-
ldev->pf[fn].dev = dev;
dev->priv.lag = ldev;
}
@@ -1281,16 +1361,13 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev)
{
- int i;
+ int fn;
- for (i = 0; i < ldev->ports; i++)
- if (ldev->pf[i].dev == dev)
- break;
-
- if (i == ldev->ports)
+ fn = mlx5_get_dev_index(dev);
+ if (ldev->pf[fn].dev != dev)
return;
- ldev->pf[i].dev = NULL;
+ ldev->pf[fn].dev = NULL;
dev->priv.lag = NULL;
}
@@ -1398,7 +1475,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
struct net_device *netdev)
{
struct mlx5_lag *ldev;
- int i;
+ int num = 0;
ldev = mlx5_lag_dev(dev);
if (!ldev)
@@ -1406,17 +1483,33 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mutex_lock(&ldev->lock);
mlx5_ldev_add_netdev(ldev, dev, netdev);
-
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].netdev)
- break;
-
- if (i >= ldev->ports)
+ num = mlx5_lag_num_netdevs(ldev);
+ if (num >= ldev->ports)
set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0);
}
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx)
+{
+ int i;
+
+ for (i = start_idx; i >= end_idx; i--)
+ if (ldev->pf[i].dev)
+ return i;
+ return -1;
+}
+
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx)
+{
+ int i;
+
+ for (i = start_idx; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ return i;
+ return MLX5_MAX_PORTS;
+}
+
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
@@ -1467,12 +1560,13 @@ bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
unsigned long flags;
- bool res;
+ bool res = false;
+ int idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_active(ldev) &&
- dev == ldev->pf[MLX5_LAG_P1].dev;
+ idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ res = ldev && __mlx5_lag_is_active(ldev) && idx >= 0 && dev == ldev->pf[idx].dev;
spin_unlock_irqrestore(&lag_lock, flags);
return res;
@@ -1555,7 +1649,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == slave) {
port = i;
break;
@@ -1594,13 +1688,13 @@ struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int
if (!ldev)
goto unlock;
- if (*i == ldev->ports)
+ if (*i == MLX5_MAX_PORTS)
goto unlock;
- for (idx = *i; idx < ldev->ports; idx++)
+ mlx5_ldev_for_each(idx, *i, ldev)
if (ldev->pf[idx].dev != dev)
break;
- if (idx == ldev->ports) {
+ if (idx == MLX5_MAX_PORTS) {
*i = idx;
goto unlock;
}
@@ -1621,10 +1715,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev **mdev;
+ int ret = 0, i, j, idx = 0;
struct mlx5_lag *ldev;
unsigned long flags;
int num_ports;
- int ret, i, j;
void *out;
out = kvzalloc(outlen, GFP_KERNEL);
@@ -1643,8 +1737,8 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = ldev->ports;
- for (i = 0; i < ldev->ports; i++)
- mdev[i] = ldev->pf[i].dev;
+ mlx5_ldev_for_each(i, 0, ldev)
+ mdev[idx++] = ldev->pf[i].dev;
} else {
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 50fcb1eee574..c2f256bb2bc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -92,6 +92,7 @@ mlx5_lag_is_ready(struct mlx5_lag *ldev)
return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev);
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev);
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
@@ -103,7 +104,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled);
void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
@@ -119,9 +120,24 @@ static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+ mlx5_get_dev_index(dev) >= MLX5_MAX_PORTS ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
+#define mlx5_ldev_for_each(i, start_index, ldev) \
+ for (int tmp = start_index; tmp = mlx5_get_next_ldev_func(ldev, tmp), \
+ i = tmp, tmp < MLX5_MAX_PORTS; tmp++)
+
+#define mlx5_ldev_for_each_reverse(i, start_index, end_index, ldev) \
+ for (int tmp = start_index, tmp1 = end_index; \
+ tmp = mlx5_get_pre_ldev_func(ldev, tmp, tmp1), \
+ i = tmp, tmp >= tmp1; tmp--)
+
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx);
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx);
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq);
+int mlx5_lag_num_devs(struct mlx5_lag *ldev);
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index b1aa494c76ba..aee17fcf3b36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -17,7 +17,10 @@ static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
#define MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS 2
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!mlx5_lag_is_ready(ldev))
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
+
+ if (idx0 < 0 || idx1 < 0 || !mlx5_lag_is_ready(ldev))
return false;
if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
@@ -26,8 +29,8 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
if (ldev->ports > MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS)
return false;
- return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
- ldev->pf[MLX5_LAG_P2].dev);
+ return mlx5_esw_multipath_prereq(ldev->pf[idx0].dev,
+ ldev->pf[idx1].dev);
}
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
@@ -50,43 +53,45 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
enum mlx5_lag_port_affinity port)
{
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
struct lag_tracker tracker = {};
- if (!__mlx5_lag_is_multipath(ldev))
+ if (idx0 < 0 || idx1 < 0 || !__mlx5_lag_is_multipath(ldev))
return;
switch (port) {
case MLX5_LAG_NORMAL_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
case MLX5_LAG_P1_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P2].link_up = false;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].tx_enabled = false;
+ tracker.netdev_state[idx1].link_up = false;
break;
case MLX5_LAG_P2_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P1].link_up = false;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = false;
+ tracker.netdev_state[idx0].link_up = false;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
default:
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx0].dev,
"Invalid affinity port %d", port);
return;
}
- if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events,
+ if (tracker.netdev_state[idx0].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx0].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
- if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events,
+ if (tracker.netdev_state[idx1].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx1].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
@@ -150,9 +155,14 @@ mlx5_lag_get_next_fib_dev(struct mlx5_lag *ldev,
static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
struct fib_entry_notifier_info *fen_info)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct net_device *nh_dev0, *nh_dev1;
struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
+
+ if (idx < 0)
+ return;
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
@@ -179,17 +189,19 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
}
if (nh_dev0 == nh_dev1) {
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx].dev,
"Multipath offload doesn't support routes with multiple nexthops of the same device");
return;
}
if (!nh_dev1) {
if (__mlx5_lag_is_active(ldev)) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev0);
-
- i++;
- mlx5_lag_set_port_affinity(ldev, i);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ dev_idx++;
+ if (ldev->pf[i].netdev == nh_dev0)
+ break;
+ }
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
@@ -214,6 +226,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
struct fib_info *fi)
{
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
/* Check the nh event is related to the route */
if (!mp->fib.mfi || mp->fib.mfi != fi)
@@ -221,11 +234,15 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
/* nh added/removed */
if (event == FIB_EVENT_NH_DEL) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == fib_nh->fib_nh_dev)
+ break;
+ dev_idx++;
+ }
- if (i >= 0) {
- i = (i + 1) % 2 + 1; /* peer port */
- mlx5_lag_set_port_affinity(ldev, i);
+ if (dev_idx >= 0) {
+ dev_idx = (dev_idx + 1) % 2 + 1; /* peer port */
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
}
} else if (event == FIB_EVENT_NH_ADD &&
fib_info_num_path(fi) == 2) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 571ea26edd0c..aad52d3a90e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -15,7 +15,7 @@ static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
u32 pf_metadata;
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = ldev->lag_mpesw.pf_metadata[i];
@@ -36,7 +36,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
u32 pf_metadata;
int i, err;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = mlx5_esw_match_metadata_alloc(esw);
@@ -52,7 +52,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
goto err_metadata;
}
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
(void *)0);
@@ -65,23 +65,22 @@ err_metadata:
return err;
}
-#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
int err;
int i;
- if (ldev->mode != MLX5_LAG_MODE_NONE)
+ if (idx < 0 || ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
- if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
- return -EOPNOTSUPP;
-
+ dev0 = ldev->pf[idx].dev;
if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
!MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
- !mlx5_lag_check_prereq(ldev))
+ !mlx5_lag_check_prereq(ldev) ||
+ !mlx5_lag_shared_fdb_supported(ldev))
return -EOPNOTSUPP;
err = mlx5_mpesw_metadata_set(ldev);
@@ -98,7 +97,7 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
@@ -112,7 +111,7 @@ err_rescan_drivers:
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index ab2717012b79..d832a12ffec0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -39,15 +39,18 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer,
u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_namespace *ns;
- int err, i;
- int idx;
- int j;
+ struct mlx5_core_dev *dev;
+ int err, i, j, k, idx;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev = ldev->pf[first_idx].dev;
ft_attr.max_fte = ldev->ports * ldev->buckets;
ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
@@ -74,7 +77,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
u8 affinity;
@@ -88,13 +91,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
- do {
+ mlx5_ldev_for_each_reverse(k, i, 0, ldev) {
while (j--) {
- idx = i * ldev->buckets + j;
+ idx = k * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
}
j = ldev->buckets;
- } while (i--);
+ }
goto destroy_fg;
}
}
@@ -295,11 +298,16 @@ static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_definer *lag_definer;
+ struct mlx5_core_dev *dev;
u32 *match_definer_mask;
int format_id, err;
+ if (first_idx < 0)
+ return ERR_PTR(-EINVAL);
+
+ dev = ldev->pf[first_idx].dev;
lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
if (!lag_definer)
return ERR_PTR(-ENOMEM);
@@ -341,12 +349,15 @@ free_lag_definer:
static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
- int idx;
- int i;
- int j;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev;
+ int idx, i, j;
- for (i = 0; i < ldev->ports; i++) {
+ if (first_idx < 0)
+ return;
+
+ dev = ldev->pf[first_idx].dev;
+ mlx5_ldev_for_each(i, first_idx, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
@@ -501,10 +512,15 @@ static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->outer.ttc);
@@ -512,10 +528,15 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->inner.ttc);
@@ -530,7 +551,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
set_tt_map(port_sel, hash_type);
err = mlx5_lag_create_definers(ldev, hash_type, ports);
if (err)
- return err;
+ goto clear_port_sel;
if (port_sel->tunnel) {
err = mlx5_lag_create_inner_ttc_table(ldev);
@@ -549,6 +570,8 @@ destroy_inner:
mlx5_destroy_ttc_table(port_sel->inner.ttc);
destroy_definers:
mlx5_lag_destroy_definers(ldev);
+clear_port_sel:
+ memset(port_sel, 0, sizeof(*port_sel));
return err;
}
@@ -565,7 +588,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ldev->v2p_map[idx] == ports[idx])
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 4822d01123b4..cec18efadc73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -43,6 +43,8 @@
#include <linux/cpufeature.h>
#endif /* CONFIG_X86 */
+#define MLX5_RT_CLOCK_IDENTITY_SIZE MLX5_FLD_SZ_BYTES(mrtcq_reg, rt_clock_identity)
+
enum {
MLX5_PIN_MODE_IN = 0x0,
MLX5_PIN_MODE_OUT = 0x1,
@@ -77,6 +79,56 @@ enum {
MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
};
+struct mlx5_clock_dev_state {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_devcom_comp_dev *compdev;
+ struct mlx5_nb pps_nb;
+ struct work_struct out_work;
+};
+
+struct mlx5_clock_priv {
+ struct mlx5_clock clock;
+ struct mlx5_core_dev *mdev;
+ struct mutex lock; /* protect mdev and used in PTP callbacks */
+ struct mlx5_core_dev *event_mdev;
+};
+
+static struct mlx5_clock_priv *clock_priv(struct mlx5_clock *clock)
+{
+ return container_of(clock, struct mlx5_clock_priv, clock);
+}
+
+static void mlx5_clock_lockdep_assert(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ lockdep_assert(lockdep_is_held(&clock_priv(clock)->lock));
+}
+
+static struct mlx5_core_dev *mlx5_clock_mdev_get(struct mlx5_clock *clock)
+{
+ mlx5_clock_lockdep_assert(clock);
+
+ return clock_priv(clock)->mdev;
+}
+
+static void mlx5_clock_lock(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ mutex_lock(&clock_priv(clock)->lock);
+}
+
+static void mlx5_clock_unlock(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ mutex_unlock(&clock_priv(clock)->lock);
+}
+
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
{
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
@@ -94,6 +146,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
}
+static int mlx5_clock_identity_get(struct mlx5_core_dev *mdev,
+ u8 identify[MLX5_RT_CLOCK_IDENTITY_SIZE])
+{
+ u32 out[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in),
+ out, sizeof(out), MLX5_REG_MRTCQ, 0, 0);
+ if (!err)
+ memcpy(identify, MLX5_ADDR_OF(mrtcq_reg, out, rt_clock_identity),
+ MLX5_RT_CLOCK_IDENTITY_SIZE);
+
+ return err;
+}
+
static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
{
/* Optimal shift constant leads to corrections above just 1 scaled ppm.
@@ -119,21 +187,30 @@ static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
}
+static s32 mlx5_clock_getmaxphase(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+}
+
static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
+ s32 ret;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ ret = mlx5_clock_getmaxphase(mdev);
+ mlx5_clock_unlock(clock);
- return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
- MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
- MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+ return ret;
}
static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
{
- s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info);
+ s64 max = mlx5_clock_getmaxphase(mdev);
if (delta < -max || delta > max)
return false;
@@ -209,7 +286,7 @@ static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
if (real_time_mode)
*device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
else
- *device_time = mlx5_timecounter_cyc2time(&mdev->clock, device);
+ *device_time = mlx5_timecounter_cyc2time(mdev->clock, device);
return 0;
}
@@ -220,16 +297,23 @@ static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct system_time_snapshot history_begin = {0};
struct mlx5_core_dev *mdev;
+ int err;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
- if (!mlx5_is_ptm_source_time_available(mdev))
- return -EBUSY;
+ if (!mlx5_is_ptm_source_time_available(mdev)) {
+ err = -EBUSY;
+ goto unlock;
+ }
ktime_get_snapshot(&history_begin);
- return get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
- &history_begin, cts);
+ err = get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
+ &history_begin, cts);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
#endif /* CONFIG_X86 */
@@ -263,8 +347,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
{
struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_core_dev *mdev = mlx5_clock_mdev_get(clock);
return mlx5_read_time(mdev, NULL, false) & cc->mask;
}
@@ -272,7 +355,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer;
u32 sign;
@@ -295,12 +378,10 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
static void mlx5_pps_out(struct work_struct *work)
{
- struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
- out_work);
- struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
- pps_info);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_clock_dev_state *clock_state = container_of(work, struct mlx5_clock_dev_state,
+ out_work);
+ struct mlx5_core_dev *mdev = clock_state->mdev;
+ struct mlx5_clock *clock = mdev->clock;
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
unsigned long flags;
int i;
@@ -322,17 +403,17 @@ static void mlx5_pps_out(struct work_struct *work)
}
}
-static void mlx5_timestamp_overflow(struct work_struct *work)
+static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
{
- struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_core_dev *mdev;
struct mlx5_timer *timer;
struct mlx5_clock *clock;
unsigned long flags;
- timer = container_of(dwork, struct mlx5_timer, overflow_work);
- clock = container_of(timer, struct mlx5_clock, timer);
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ timer = &clock->timer;
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto out;
@@ -343,7 +424,8 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
write_sequnlock_irqrestore(&clock->lock, flags);
out:
- schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
+ mlx5_clock_unlock(clock);
+ return timer->overflow_period;
}
static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
@@ -362,15 +444,12 @@ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
-static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+static int mlx5_clock_settime(struct mlx5_core_dev *mdev, struct mlx5_clock *clock,
+ const struct timespec64 *ts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_timer *timer = &clock->timer;
- struct mlx5_core_dev *mdev;
unsigned long flags;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
-
if (mlx5_modify_mtutc_allowed(mdev)) {
int err = mlx5_ptp_settime_real_time(mdev, ts);
@@ -386,6 +465,20 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64
return 0;
}
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ err = mlx5_clock_settime(mdev, clock, ts);
+ mlx5_clock_unlock(clock);
+
+ return err;
+}
+
static
struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
struct ptp_system_timestamp *sts)
@@ -405,7 +498,8 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct mlx5_core_dev *mdev;
u64 cycles, ns;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_real_time_mode(mdev)) {
*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
goto out;
@@ -415,6 +509,7 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
ns = mlx5_timecounter_cyc2time(clock, cycles);
*ts = ns_to_timespec64(ns);
out:
+ mlx5_clock_unlock(clock);
return 0;
}
@@ -445,14 +540,16 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err = 0;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
- int err = mlx5_ptp_adjtime_real_time(mdev, delta);
+ err = mlx5_ptp_adjtime_real_time(mdev, delta);
if (err)
- return err;
+ goto unlock;
}
write_seqlock_irqsave(&clock->lock, flags);
@@ -460,17 +557,23 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
- return 0;
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
+ int err;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ err = mlx5_ptp_adjtime_real_time(mdev, delta);
+ mlx5_clock_unlock(clock);
- return mlx5_ptp_adjtime_real_time(mdev, delta);
+ return err;
}
static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
@@ -499,15 +602,17 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err = 0;
u32 mult;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
- int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
+ err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
if (err)
- return err;
+ goto unlock;
}
mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
@@ -517,8 +622,11 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
timer->cycles.mult = mult;
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
+ ptp_schedule_worker(clock->ptp, timer->overflow_period);
- return 0;
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_extts_configure(struct ptp_clock_info *ptp,
@@ -527,25 +635,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
{
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_core_dev *mdev =
- container_of(clock, struct mlx5_core_dev, clock);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ struct mlx5_core_dev *mdev;
u32 field_select = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
int err = 0;
- if (!MLX5_PPS_CAP(mdev))
- return -EOPNOTSUPP;
-
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -569,6 +666,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
field_select = MLX5_MTPPS_FS_ENABLE;
}
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ if (!MLX5_PPS_CAP(mdev)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, pattern);
@@ -577,15 +682,23 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
- return err;
+ goto unlock;
+
+ err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
+ if (err)
+ goto unlock;
+
+ clock->pps_info.pin_armed[pin] = on;
+ clock_priv(clock)->event_mdev = mdev;
- return mlx5_set_mtppse(mdev, pin, 0,
- MLX5_EVENT_MODE_REPETETIVE & on);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta;
struct mlx5_timer *timer;
@@ -644,7 +757,7 @@ static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
struct ptp_clock_request *rq,
u32 *out_pulse_duration_ns)
{
- struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct mlx5_pps *pps_info = &mdev->clock->pps_info;
u32 out_pulse_duration;
struct timespec64 ts;
@@ -677,7 +790,7 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo
u32 *field_select, u32 *out_pulse_duration_ns,
u64 *period, u64 *time_stamp)
{
- struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct mlx5_pps *pps_info = &mdev->clock->pps_info;
struct ptp_clock_time *time = &rq->perout.start;
struct timespec64 ts;
@@ -700,38 +813,24 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo
return 0;
}
-static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
-{
- return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
- (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
-}
-
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_core_dev *mdev =
- container_of(clock, struct mlx5_core_dev, clock);
- bool rt_mode = mlx5_real_time_mode(mdev);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u32 out_pulse_duration_ns = 0;
+ struct mlx5_core_dev *mdev;
u32 field_select = 0;
u64 npps_period = 0;
u64 time_stamp = 0;
u8 pin_mode = 0;
u8 pattern = 0;
+ bool rt_mode;
int pin = -1;
int err = 0;
- if (!MLX5_PPS_CAP(mdev))
- return -EOPNOTSUPP;
-
- /* Reject requests with unsupported flags */
- if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
- return -EOPNOTSUPP;
-
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
@@ -740,14 +839,23 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (pin < 0)
return -EBUSY;
- if (on) {
- bool rt_mode = mlx5_real_time_mode(mdev);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ rt_mode = mlx5_real_time_mode(mdev);
+ if (!MLX5_PPS_CAP(mdev)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
- if (rt_mode && rq->perout.start.sec > U32_MAX)
- return -EINVAL;
+ if (rt_mode && rq->perout.start.sec > U32_MAX) {
+ err = -EINVAL;
+ goto unlock;
+ }
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
@@ -760,7 +868,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
else
err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
if (err)
- return err;
+ goto unlock;
}
MLX5_SET(mtpps_reg, in, pin, pin);
@@ -773,13 +881,16 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
- return err;
+ goto unlock;
if (rt_mode)
- return 0;
+ goto unlock;
- return mlx5_set_mtppse(mdev, pin, 0,
- MLX5_EVENT_MODE_REPETETIVE & on);
+ err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
+
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_pps_configure(struct ptp_clock_info *ptp,
@@ -852,6 +963,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.settime64 = mlx5_ptp_settime,
.enable = NULL,
.verify = NULL,
+ .do_aux_work = mlx5_timestamp_overflow,
};
static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
@@ -865,10 +977,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
mtpps_size, MLX5_REG_MTPPS, 0, 0);
}
-static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
+static int mlx5_get_pps_pin_mode(struct mlx5_core_dev *mdev, u8 pin)
{
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
-
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode;
int err;
@@ -887,8 +997,9 @@ static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
return PTP_PF_NONE;
}
-static void mlx5_init_pin_config(struct mlx5_clock *clock)
+static void mlx5_init_pin_config(struct mlx5_core_dev *mdev)
{
+ struct mlx5_clock *clock = mdev->clock;
int i;
if (!clock->ptp_info.n_pins)
@@ -904,20 +1015,27 @@ static void mlx5_init_pin_config(struct mlx5_clock *clock)
clock->ptp_info.verify = mlx5_ptp_verify;
clock->ptp_info.pps = 1;
+ clock->ptp_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+
+ if (mlx5_npps_real_time_supported(mdev))
+ clock->ptp_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
+
for (i = 0; i < clock->ptp_info.n_pins; i++) {
snprintf(clock->ptp_info.pin_config[i].name,
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
clock->ptp_info.pin_config[i].index = i;
- clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
+ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(mdev, i);
clock->ptp_info.pin_config[i].chan = 0;
}
}
static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ struct mlx5_clock *clock = mdev->clock;
mlx5_query_mtpps(mdev, out, sizeof(out));
@@ -967,16 +1085,16 @@ static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
- struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
+ struct mlx5_clock_dev_state *clock_state = mlx5_nb_cof(nb, struct mlx5_clock_dev_state,
+ pps_nb);
+ struct mlx5_core_dev *mdev = clock_state->mdev;
+ struct mlx5_clock *clock = mdev->clock;
struct ptp_clock_event ptp_event;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
- struct mlx5_core_dev *mdev;
unsigned long flags;
u64 ns;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
-
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
@@ -996,11 +1114,15 @@ static int mlx5_pps_event(struct notifier_block *nb,
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
+ if (clock->shared) {
+ mlx5_core_warn(mdev, " Received unexpected PPS out event\n");
+ break;
+ }
ns = perout_conf_next_event_timer(mdev, clock);
write_seqlock_irqsave(&clock->lock, flags);
clock->pps_info.start[pin] = ns;
write_sequnlock_irqrestore(&clock->lock, flags);
- schedule_work(&clock->pps_info.out_work);
+ schedule_work(&clock_state->out_work);
break;
default:
mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
@@ -1012,7 +1134,7 @@ static int mlx5_pps_event(struct notifier_block *nb,
static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer = &clock->timer;
u32 dev_freq;
@@ -1028,10 +1150,10 @@ static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
ktime_to_ns(ktime_get_real()));
}
-static void mlx5_init_overflow_period(struct mlx5_clock *clock)
+static void mlx5_init_overflow_period(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer = &clock->timer;
u64 overflow_cycles;
u64 frac = 0;
@@ -1052,12 +1174,11 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
do_div(ns, NSEC_PER_SEC / HZ);
timer->overflow_period = ns;
- INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
- if (timer->overflow_period)
- schedule_delayed_work(&timer->overflow_work, 0);
- else
+ if (!timer->overflow_period) {
+ timer->overflow_period = HZ;
mlx5_core_warn(mdev,
- "invalid overflow period, overflow_work is not scheduled\n");
+ "invalid overflow period, overflow_work is scheduled once per second\n");
+ }
if (clock_info)
clock_info->overflow_period = timer->overflow_period;
@@ -1065,7 +1186,7 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_ib_clock_info *info;
struct mlx5_timer *timer;
@@ -1088,7 +1209,7 @@ static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
u8 log_max_freq_adjustment = 0;
@@ -1107,7 +1228,7 @@ static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
/* Configure the PHC */
clock->ptp_info = mlx5_ptp_clock_info;
@@ -1123,38 +1244,30 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
- mlx5_init_overflow_period(clock);
+ mlx5_init_overflow_period(mdev);
if (mlx5_real_time_mode(mdev)) {
struct timespec64 ts;
ktime_get_real_ts64(&ts);
- mlx5_ptp_settime(&clock->ptp_info, &ts);
+ mlx5_clock_settime(mdev, clock, &ts);
}
}
static void mlx5_init_pps(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
-
if (!MLX5_PPS_CAP(mdev))
return;
mlx5_get_pps_caps(mdev);
- mlx5_init_pin_config(clock);
+ mlx5_init_pin_config(mdev);
}
-void mlx5_init_clock(struct mlx5_core_dev *mdev)
+static void mlx5_init_clock_dev(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
-
- if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
- mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
- return;
- }
+ struct mlx5_clock *clock = mdev->clock;
seqlock_init(&clock->lock);
- INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
/* Initialize the device clock */
mlx5_init_timer_clock(mdev);
@@ -1163,33 +1276,27 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
mlx5_init_pps(mdev);
clock->ptp = ptp_clock_register(&clock->ptp_info,
- &mdev->pdev->dev);
+ clock->shared ? NULL : &mdev->pdev->dev);
if (IS_ERR(clock->ptp)) {
- mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
+ mlx5_core_warn(mdev, "%sptp_clock_register failed %ld\n",
+ clock->shared ? "shared clock " : "",
PTR_ERR(clock->ptp));
clock->ptp = NULL;
}
- MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
- mlx5_eq_notifier_register(mdev, &clock->pps_nb);
+ if (clock->ptp)
+ ptp_schedule_worker(clock->ptp, 0);
}
-void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+static void mlx5_destroy_clock_dev(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
-
- if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
- return;
+ struct mlx5_clock *clock = mdev->clock;
- mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
if (clock->ptp) {
ptp_clock_unregister(clock->ptp);
clock->ptp = NULL;
}
- cancel_work_sync(&clock->pps_info.out_work);
- cancel_delayed_work_sync(&clock->timer.overflow_work);
-
if (mdev->clock_info) {
free_page((unsigned long)mdev->clock_info);
mdev->clock_info = NULL;
@@ -1197,3 +1304,248 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
kfree(clock->ptp_info.pin_config);
}
+
+static void mlx5_clock_free(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock_priv *cpriv = clock_priv(mdev->clock);
+
+ mlx5_destroy_clock_dev(mdev);
+ mutex_destroy(&cpriv->lock);
+ kfree(cpriv);
+ mdev->clock = NULL;
+}
+
+static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared)
+{
+ struct mlx5_clock_priv *cpriv;
+ struct mlx5_clock *clock;
+
+ cpriv = kzalloc(sizeof(*cpriv), GFP_KERNEL);
+ if (!cpriv)
+ return -ENOMEM;
+
+ mutex_init(&cpriv->lock);
+ cpriv->mdev = mdev;
+ clock = &cpriv->clock;
+ clock->shared = shared;
+ mdev->clock = clock;
+ mlx5_clock_lock(clock);
+ mlx5_init_clock_dev(mdev);
+ mlx5_clock_unlock(clock);
+
+ if (!clock->shared)
+ return 0;
+
+ if (!clock->ptp) {
+ mlx5_core_warn(mdev, "failed to create ptp dev shared by multiple functions");
+ mlx5_clock_free(mdev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_devcom_comp_dev *pos;
+
+ mdev->clock_state->compdev = mlx5_devcom_register_component(mdev->priv.devc,
+ MLX5_DEVCOM_SHARED_CLOCK,
+ key, NULL, mdev);
+ if (IS_ERR(mdev->clock_state->compdev))
+ return;
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ if (next) {
+ mdev->clock = next->clock;
+ /* clock info is shared among all the functions using the same clock */
+ mdev->clock_info = next->clock_info;
+ } else {
+ mlx5_clock_alloc(mdev, true);
+ }
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+ if (!mdev->clock) {
+ mlx5_devcom_unregister_component(mdev->clock_state->compdev);
+ mdev->clock_state->compdev = NULL;
+ }
+}
+
+static void mlx5_shared_clock_unregister(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_devcom_comp_dev *pos;
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock && peer_dev != mdev) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ if (next) {
+ struct mlx5_clock_priv *cpriv = clock_priv(clock);
+
+ mlx5_clock_lock(clock);
+ if (mdev == cpriv->mdev)
+ cpriv->mdev = next;
+ mlx5_clock_unlock(clock);
+ } else {
+ mlx5_clock_free(mdev);
+ }
+
+ mdev->clock = NULL;
+ mdev->clock_info = NULL;
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+ mlx5_devcom_unregister_component(mdev->clock_state->compdev);
+}
+
+static void mlx5_clock_arm_pps_in_event(struct mlx5_clock *clock,
+ struct mlx5_core_dev *new_mdev,
+ struct mlx5_core_dev *old_mdev)
+{
+ struct ptp_clock_info *ptp_info = &clock->ptp_info;
+ struct mlx5_clock_priv *cpriv = clock_priv(clock);
+ int i;
+
+ for (i = 0; i < ptp_info->n_pins; i++) {
+ if (ptp_info->pin_config[i].func != PTP_PF_EXTTS ||
+ !clock->pps_info.pin_armed[i])
+ continue;
+
+ if (new_mdev) {
+ mlx5_set_mtppse(new_mdev, i, 0, MLX5_EVENT_MODE_REPETETIVE);
+ cpriv->event_mdev = new_mdev;
+ } else {
+ cpriv->event_mdev = NULL;
+ }
+
+ if (old_mdev)
+ mlx5_set_mtppse(old_mdev, i, 0, MLX5_EVENT_MODE_DISABLE);
+ }
+}
+
+void mlx5_clock_load(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_clock_priv *cpriv;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ INIT_WORK(&mdev->clock_state->out_work, mlx5_pps_out);
+ MLX5_NB_INIT(&mdev->clock_state->pps_nb, mlx5_pps_event, PPS_EVENT);
+ mlx5_eq_notifier_register(mdev, &mdev->clock_state->pps_nb);
+
+ if (!clock->shared) {
+ mlx5_clock_arm_pps_in_event(clock, mdev, NULL);
+ return;
+ }
+
+ cpriv = clock_priv(clock);
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_clock_lock(clock);
+ if (mdev == cpriv->mdev && mdev != cpriv->event_mdev)
+ mlx5_clock_arm_pps_in_event(clock, mdev, cpriv->event_mdev);
+ mlx5_clock_unlock(clock);
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+}
+
+void mlx5_clock_unload(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_devcom_comp_dev *pos;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (!clock->shared) {
+ mlx5_clock_arm_pps_in_event(clock, NULL, mdev);
+ goto out;
+ }
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock && peer_dev != mdev) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ mlx5_clock_lock(clock);
+ if (mdev == clock_priv(clock)->event_mdev)
+ mlx5_clock_arm_pps_in_event(clock, next, mdev);
+ mlx5_clock_unlock(clock);
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+out:
+ mlx5_eq_notifier_unregister(mdev, &mdev->clock_state->pps_nb);
+ cancel_work_sync(&mdev->clock_state->out_work);
+}
+
+static struct mlx5_clock null_clock;
+
+int mlx5_init_clock(struct mlx5_core_dev *mdev)
+{
+ u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE];
+ struct mlx5_clock_dev_state *clock_state;
+ u64 key;
+ int err;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
+ mdev->clock = &null_clock;
+ mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
+ return 0;
+ }
+
+ clock_state = kzalloc(sizeof(*clock_state), GFP_KERNEL);
+ if (!clock_state)
+ return -ENOMEM;
+ clock_state->mdev = mdev;
+ mdev->clock_state = clock_state;
+
+ if (MLX5_CAP_MCAM_REG3(mdev, mrtcq) && mlx5_real_time_mode(mdev)) {
+ if (mlx5_clock_identity_get(mdev, identity)) {
+ mlx5_core_warn(mdev, "failed to get rt clock identity, create ptp dev per function\n");
+ } else {
+ memcpy(&key, &identity, sizeof(key));
+ mlx5_shared_clock_register(mdev, key);
+ }
+ }
+
+ if (!mdev->clock) {
+ err = mlx5_clock_alloc(mdev, false);
+ if (err) {
+ kfree(clock_state);
+ mdev->clock_state = NULL;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (mdev->clock->shared)
+ mlx5_shared_clock_unregister(mdev);
+ else
+ mlx5_clock_free(mdev);
+ kfree(mdev->clock_state);
+ mdev->clock_state = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index bd95b9f8d143..c18a652c0faa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -33,6 +33,35 @@
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
+#include <linux/ptp_clock_kernel.h>
+
+#define MAX_PIN_NUM 8
+struct mlx5_pps {
+ u8 pin_caps[MAX_PIN_NUM];
+ u64 start[MAX_PIN_NUM];
+ u8 enabled;
+ u64 min_npps_period;
+ u64 min_out_pulse_duration_ns;
+ bool pin_armed[MAX_PIN_NUM];
+};
+
+struct mlx5_timer {
+ struct cyclecounter cycles;
+ struct timecounter tc;
+ u32 nominal_c_mult;
+ unsigned long overflow_period;
+};
+
+struct mlx5_clock {
+ seqlock_t lock;
+ struct hwtstamp_config hwtstamp_config;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+ struct mlx5_pps pps_info;
+ struct mlx5_timer timer;
+ bool shared;
+};
+
static inline bool mlx5_is_real_time_rq(struct mlx5_core_dev *mdev)
{
u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format);
@@ -54,12 +83,14 @@ static inline bool mlx5_is_real_time_sq(struct mlx5_core_dev *mdev)
typedef ktime_t (*cqe_ts_to_ns)(struct mlx5_clock *, u64);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
-void mlx5_init_clock(struct mlx5_core_dev *mdev);
+int mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
+void mlx5_clock_load(struct mlx5_core_dev *mdev);
+void mlx5_clock_unload(struct mlx5_core_dev *mdev);
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
- return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1;
+ return mdev->clock->ptp ? ptp_clock_index(mdev->clock->ptp) : -1;
}
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
@@ -87,8 +118,10 @@ static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
return ns_to_ktime(time);
}
#else
-static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
+static inline int mlx5_init_clock(struct mlx5_core_dev *mdev) { return 0; }
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_clock_load(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_clock_unload(struct mlx5_core_dev *mdev) {}
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index d58032dd0df7..c79699b94a02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -11,6 +11,7 @@ enum mlx5_devcom_component {
MLX5_DEVCOM_MPV,
MLX5_DEVCOM_HCA_PORTS,
MLX5_DEVCOM_SD_GROUP,
+ MLX5_DEVCOM_SHARED_CLOCK,
MLX5_DEVCOM_NUM_COMPONENTS,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index a80ecb672f33..0a3c260af377 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -161,7 +161,8 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ FT_TBL_SZ : MLX5_FS_MAX_POOL_SIZE;
ft_attr.max_fte = sz;
/* We use chains_default_ft(chains) as the table's next_ft till
@@ -196,6 +197,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
}
+ if (!ns) {
+ mlx5_core_warn(chains->dev, "Failed to get flow namespace\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
ft_attr.autogroup.num_reserved_entries = 2;
ft_attr.autogroup.max_num_groups = chains->group_num;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
@@ -699,7 +705,7 @@ mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
goto err_ignore;
}
- chain = mlx5_chains_get_chain_range(chains),
+ chain = mlx5_chains_get_chain_range(chains);
prio = mlx5_chains_get_prio_range(chains);
level = mlx5_chains_get_level_range(chains);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index 9f13cea16446..ca9ecec358b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -61,6 +61,25 @@ static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
}
}
+static const char *mlx5_traffic_types_names[MLX5_NUM_TT] = {
+ [MLX5_TT_IPV4_TCP] = "TT_IPV4_TCP",
+ [MLX5_TT_IPV6_TCP] = "TT_IPV6_TCP",
+ [MLX5_TT_IPV4_UDP] = "TT_IPV4_UDP",
+ [MLX5_TT_IPV6_UDP] = "TT_IPV6_UDP",
+ [MLX5_TT_IPV4_IPSEC_AH] = "TT_IPV4_IPSEC_AH",
+ [MLX5_TT_IPV6_IPSEC_AH] = "TT_IPV6_IPSEC_AH",
+ [MLX5_TT_IPV4_IPSEC_ESP] = "TT_IPV4_IPSEC_ESP",
+ [MLX5_TT_IPV6_IPSEC_ESP] = "TT_IPV6_IPSEC_ESP",
+ [MLX5_TT_IPV4] = "TT_IPV4",
+ [MLX5_TT_IPV6] = "TT_IPV6",
+ [MLX5_TT_ANY] = "TT_ANY"
+};
+
+const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt)
+{
+ return mlx5_traffic_types_names[tt];
+}
+
struct mlx5_etype_proto {
u16 etype;
u8 proto;
@@ -618,10 +637,6 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
bool use_l4_type;
int err;
- ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
- if (!ttc)
- return ERR_PTR(-ENOMEM);
-
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
@@ -635,7 +650,16 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(-EINVAL);
}
+ ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+ if (!ttc)
+ return ERR_PTR(-ENOMEM);
+
ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ if (!ns) {
+ kvfree(ttc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&inner_ttc_groups[TTC_GROUPS_DEFAULT];
@@ -691,10 +715,6 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
bool use_l4_type;
int err;
- ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
- if (!ttc)
- return ERR_PTR(-ENOMEM);
-
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
@@ -708,7 +728,16 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(-EINVAL);
}
+ ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+ if (!ttc)
+ return ERR_PTR(-ENOMEM);
+
ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ if (!ns) {
+ kvfree(ttc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&ttc_groups[TTC_GROUPS_DEFAULT];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index 92eea6bea310..ab9434fe3ae6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -49,6 +49,7 @@ struct ttc_params {
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
};
+const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt);
struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc);
struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
index 4a078113e292..762d55ba9e51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
@@ -497,7 +497,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&dest, 0, sizeof(struct mlx5_flow_destination));
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ dest.counter = tx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -519,7 +519,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
flow_act.flags = FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ dest.counter = tx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1200,7 +1200,7 @@ static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ roce_dest[dstn].counter = rx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1);
if (IS_ERR(rule)) {
@@ -1592,7 +1592,7 @@ static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ dest.counter = rx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 220a9ac75c8b..41e8660c819c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -368,6 +368,10 @@ int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_ty
u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
int err;
+ if (WARN_ON(!dev->caps.hca[cap_type]))
+ /* this cap_type must be added to mlx5_hca_caps_alloc() */
+ return -EINVAL;
+
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
@@ -664,6 +668,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
ilog2(max_uc_list));
+ /* enable absolute native port num */
+ if (MLX5_CAP_GEN_MAX(dev, abs_native_port_num))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, abs_native_port_num, 1);
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -941,9 +949,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
mlx5_pci_vsc_init(dev);
- err = pci_enable_ptm(pdev, NULL);
- if (err)
- mlx5_core_info(dev, "PTM is not supported by PCIe\n");
+ pci_enable_ptm(pdev, NULL);
return 0;
@@ -1032,7 +1038,11 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_init_reserved_gids(dev);
- mlx5_init_clock(dev);
+ err = mlx5_init_clock(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to initialize hardware clock\n");
+ goto err_tables_cleanup;
+ }
dev->vxlan = mlx5_vxlan_create(dev);
dev->geneve = mlx5_geneve_create(dev);
@@ -1040,7 +1050,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
err = mlx5_init_rl_table(dev);
if (err) {
mlx5_core_err(dev, "Failed to init rate limiting\n");
- goto err_tables_cleanup;
+ goto err_clock_cleanup;
}
err = mlx5_mpfs_init(dev);
@@ -1117,10 +1127,11 @@ err_mpfs_cleanup:
mlx5_mpfs_cleanup(dev);
err_rl_cleanup:
mlx5_cleanup_rl_table(dev);
-err_tables_cleanup:
+err_clock_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
+err_tables_cleanup:
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
@@ -1199,24 +1210,24 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
- mlx5_start_health_poll(dev);
-
err = mlx5_core_enable_hca(dev, 0);
if (err) {
mlx5_core_err(dev, "enable hca failed\n");
- goto stop_health_poll;
+ goto err_cmd_cleanup;
}
+ mlx5_start_health_poll(dev);
+
err = mlx5_core_set_issi(dev);
if (err) {
mlx5_core_err(dev, "failed to set issi\n");
- goto err_disable_hca;
+ goto stop_health_poll;
}
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
mlx5_core_err(dev, "failed to allocate boot pages\n");
- goto err_disable_hca;
+ goto stop_health_poll;
}
err = mlx5_tout_query_dtor(dev);
@@ -1229,10 +1240,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
-err_disable_hca:
- mlx5_core_disable_hca(dev, 0);
stop_health_poll:
mlx5_stop_health_poll(dev, boot);
+ mlx5_core_disable_hca(dev, 0);
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_disable(dev);
@@ -1243,8 +1253,8 @@ err_cmd_cleanup:
static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot)
{
mlx5_reclaim_startup_pages(dev);
- mlx5_core_disable_hca(dev, 0);
mlx5_stop_health_poll(dev, boot);
+ mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_disable(dev);
}
@@ -1353,6 +1363,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_eq_table;
}
+ mlx5_clock_load(dev);
+
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
@@ -1436,6 +1448,7 @@ err_fpga_start:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
+ mlx5_clock_unload(dev);
mlx5_eq_table_destroy(dev);
err_eq_table:
mlx5_irq_table_destroy(dev);
@@ -1462,6 +1475,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
+ mlx5_clock_unload(dev);
mlx5_eq_table_destroy(dev);
mlx5_irq_table_destroy(dev);
mlx5_pagealloc_stop(dev);
@@ -1788,6 +1802,8 @@ static const int types[] = {
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
+ MLX5_CAP_SHAMPO,
+ MLX5_CAP_ADV_RDMA,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 99de67c3aa74..2e02bdea8361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -114,6 +114,26 @@ struct mlx5_cmd_alias_obj_create_attr {
u8 access_key[ACCESS_KEY_LEN];
};
+struct mlx5_port_eth_proto {
+ u32 cap;
+ u32 admin;
+ u32 oper;
+};
+
+struct mlx5_module_eeprom_query_params {
+ u16 size;
+ u16 offset;
+ u16 i2c_address;
+ u32 page;
+ u32 bank;
+ u32 module_number;
+};
+
+struct mlx5_link_info {
+ u32 speed;
+ u32 lanes;
+};
+
static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
{
struct device *device = dev->device;
@@ -280,6 +300,78 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
+
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
+ u8 *pfc_en_rx);
+
+int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+ u16 stall_minor_watermark);
+int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 *stall_critical_watermark,
+ u16 *stall_minor_watermark);
+
+int mlx5_max_tc(struct mlx5_core_dev *mdev);
+int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+ u8 prio, u8 *tc);
+int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *tc_group);
+int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *bw_pct);
+int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
+int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+
+int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
+int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+ bool *enabled);
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data);
+int
+mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params,
+ u8 *data);
+
+int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
+int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
+int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
+int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
+int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
+
+int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+ struct mlx5_port_eth_proto *eproto);
+bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
+const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ u32 eth_proto_oper,
+ bool force_legacy);
+u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
+ struct mlx5_link_info *info,
+ bool force_legacy);
+int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
MLX5_CAP_GEN((mdev), pps_modify) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
@@ -346,6 +438,8 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id);
+
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 0881e961d8b1..586688da9940 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -10,12 +10,15 @@
struct mlx5_irq;
struct cpu_rmap;
+struct mlx5_irq_pool;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev);
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
@@ -38,7 +41,6 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
int mlx5_irq_get_index(struct mlx5_irq *irq);
int mlx5_irq_get_irq(const struct mlx5_irq *irq);
-struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
struct cpumask *used_cpus, u16 vecidx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 972e8e9df585..9bc9bd83c232 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -291,7 +291,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
- int nid = dev_to_node(device);
+ int nid = dev->priv.numa_node;
struct page *page;
u64 zero_addr = 1;
u64 addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 7db9cab9bedf..40024cfa3099 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -148,7 +148,7 @@ out:
* Free the IRQ and other resources such as rmap from the system.
* BUT doesn't free or remove reference from mlx5.
* This function is very important for the shutdown flow, where we need to
- * cleanup system resoruces but keep mlx5 objects alive,
+ * cleanup system resources but keep mlx5 objects alive,
* see mlx5_irq_table_free_irqs().
*/
static void mlx5_system_free_irq(struct mlx5_irq *irq)
@@ -378,6 +378,11 @@ int mlx5_irq_get_index(struct mlx5_irq *irq)
return irq->map.index;
}
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq)
+{
+ return irq->pool;
+}
+
/* irq_pool API */
/* requesting an irq from a given pool according to given index */
@@ -405,18 +410,20 @@ static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_tab
return irq_table->sf_ctrl_pool;
}
-static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
+static struct mlx5_irq_pool *
+sf_comp_irq_pool_get(struct mlx5_irq_table *irq_table)
{
return irq_table->sf_comp_pool;
}
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = NULL;
if (mlx5_core_is_sf(dev))
- pool = sf_irq_pool_get(irq_table);
+ pool = sf_comp_irq_pool_get(irq_table);
/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
* the PF IRQs pool in case the SF pool doesn't exist.
@@ -572,7 +579,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
- name, size, start);
+ name ? name : "mlx5_pcif_pool", size, start);
return pool;
}
@@ -581,7 +588,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
struct mlx5_irq *irq;
unsigned long index;
- /* There are cases in which we are destrying the irq_table before
+ /* There are cases in which we are destroying the irq_table before
* freeing all the IRQs, fast teardown for example. Hence, free the irqs
* which might not have been freed.
*/
@@ -610,7 +617,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec,
if (!mlx5_sf_max_functions(dev))
return 0;
if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
- mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
+ mlx5_core_dbg(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
index c4d377f8df30..cc064425fe16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
@@ -28,7 +28,6 @@ struct mlx5_irq_pool {
struct mlx5_core_dev *dev;
};
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev);
static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
{
return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
@@ -40,5 +39,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
int mlx5_irq_get_locked(struct mlx5_irq *irq);
int mlx5_irq_read_locked(struct mlx5_irq *irq);
int mlx5_irq_put(struct mlx5_irq *irq);
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq);
#endif /* __PCI_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 50931584132b..549f1066d2a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -196,7 +196,6 @@ void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
if (ps == MLX5_PORT_UP)
mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
}
-EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
@@ -210,7 +209,6 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status)
@@ -227,7 +225,6 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
*status = MLX5_GET(paos_reg, out, admin_status);
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
u16 *max_mtu, u16 *oper_mtu, u8 port)
@@ -257,7 +254,6 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
u8 port)
@@ -447,7 +443,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
return mlx5_query_mcia(dev, &query, data);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
@@ -467,7 +462,6 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
return mlx5_query_mcia(dev, params, data);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom_by_page);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
@@ -518,7 +512,6 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause)
@@ -538,7 +531,6 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
u16 stall_critical_watermark,
@@ -597,7 +589,6 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
{
@@ -616,7 +607,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
@@ -667,7 +657,6 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
u8 prio, u8 *tc)
@@ -689,7 +678,6 @@ int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
int inlen)
@@ -728,7 +716,6 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
u8 tc, u8 *tc_group)
@@ -749,7 +736,6 @@ int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
@@ -763,7 +749,6 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct)
@@ -784,7 +769,6 @@ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc);
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
@@ -808,7 +792,6 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit);
int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
@@ -834,7 +817,6 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
@@ -845,7 +827,6 @@ int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
return mlx5_cmd_exec_in(mdev, set_wol_rol, in);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
@@ -860,7 +841,6 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen)
{
@@ -1058,53 +1038,57 @@ out:
}
/* speed in units of 1Mb */
-static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = {
- [MLX5E_1000BASE_CX_SGMII] = 1000,
- [MLX5E_1000BASE_KX] = 1000,
- [MLX5E_10GBASE_CX4] = 10000,
- [MLX5E_10GBASE_KX4] = 10000,
- [MLX5E_10GBASE_KR] = 10000,
- [MLX5E_20GBASE_KR2] = 20000,
- [MLX5E_40GBASE_CR4] = 40000,
- [MLX5E_40GBASE_KR4] = 40000,
- [MLX5E_56GBASE_R4] = 56000,
- [MLX5E_10GBASE_CR] = 10000,
- [MLX5E_10GBASE_SR] = 10000,
- [MLX5E_10GBASE_ER] = 10000,
- [MLX5E_40GBASE_SR4] = 40000,
- [MLX5E_40GBASE_LR4] = 40000,
- [MLX5E_50GBASE_SR2] = 50000,
- [MLX5E_100GBASE_CR4] = 100000,
- [MLX5E_100GBASE_SR4] = 100000,
- [MLX5E_100GBASE_KR4] = 100000,
- [MLX5E_100GBASE_LR4] = 100000,
- [MLX5E_100BASE_TX] = 100,
- [MLX5E_1000BASE_T] = 1000,
- [MLX5E_10GBASE_T] = 10000,
- [MLX5E_25GBASE_CR] = 25000,
- [MLX5E_25GBASE_KR] = 25000,
- [MLX5E_25GBASE_SR] = 25000,
- [MLX5E_50GBASE_CR2] = 50000,
- [MLX5E_50GBASE_KR2] = 50000,
+static const struct mlx5_link_info mlx5e_link_info[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = {.speed = 1000, .lanes = 1},
+ [MLX5E_1000BASE_KX] = {.speed = 1000, .lanes = 1},
+ [MLX5E_10GBASE_CX4] = {.speed = 10000, .lanes = 4},
+ [MLX5E_10GBASE_KX4] = {.speed = 10000, .lanes = 4},
+ [MLX5E_10GBASE_KR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_20GBASE_KR2] = {.speed = 20000, .lanes = 2},
+ [MLX5E_40GBASE_CR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_40GBASE_KR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_56GBASE_R4] = {.speed = 56000, .lanes = 4},
+ [MLX5E_10GBASE_CR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_10GBASE_SR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_10GBASE_ER] = {.speed = 10000, .lanes = 1},
+ [MLX5E_40GBASE_SR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_40GBASE_LR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_50GBASE_SR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_100GBASE_CR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_SR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_KR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_LR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100BASE_TX] = {.speed = 100, .lanes = 1},
+ [MLX5E_1000BASE_T] = {.speed = 1000, .lanes = 1},
+ [MLX5E_10GBASE_T] = {.speed = 10000, .lanes = 1},
+ [MLX5E_25GBASE_CR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_25GBASE_KR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_25GBASE_SR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_50GBASE_CR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_50GBASE_KR2] = {.speed = 50000, .lanes = 2},
};
-static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
- [MLX5E_SGMII_100M] = 100,
- [MLX5E_1000BASE_X_SGMII] = 1000,
- [MLX5E_5GBASE_R] = 5000,
- [MLX5E_10GBASE_XFI_XAUI_1] = 10000,
- [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000,
- [MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000,
- [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
- [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
- [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
- [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
- [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
- [MLX5E_400GAUI_8_400GBASE_CR8] = 400000,
- [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000,
- [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000,
- [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000,
- [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = 800000,
+static const struct mlx5_link_info
+mlx5e_ext_link_info[MLX5E_EXT_LINK_MODES_NUMBER] = {
+ [MLX5E_SGMII_100M] = {.speed = 100, .lanes = 1},
+ [MLX5E_1000BASE_X_SGMII] = {.speed = 1000, .lanes = 1},
+ [MLX5E_5GBASE_R] = {.speed = 5000, .lanes = 1},
+ [MLX5E_10GBASE_XFI_XAUI_1] = {.speed = 10000, .lanes = 1},
+ [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_25GAUI_1_25GBASE_CR_KR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = {.speed = 50000, .lanes = 1},
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = {.speed = 100000, .lanes = 2},
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = {.speed = 200000, .lanes = 4},
+ [MLX5E_400GAUI_8_400GBASE_CR8] = {.speed = 400000, .lanes = 8},
+ [MLX5E_100GAUI_1_100GBASE_CR_KR] = {.speed = 100000, .lanes = 1},
+ [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = {.speed = 200000, .lanes = 2},
+ [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = {.speed = 400000, .lanes = 4},
+ [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = {.speed = 800000, .lanes = 8},
+ [MLX5E_200GAUI_1_200GBASE_CR1_KR1] = {.speed = 200000, .lanes = 1},
+ [MLX5E_400GAUI_2_400GBASE_CR2_KR2] = {.speed = 400000, .lanes = 2},
+ [MLX5E_800GAUI_4_800GBASE_CR4_KR4] = {.speed = 800000, .lanes = 4},
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
@@ -1142,54 +1126,61 @@ bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev)
return !!eproto.cap;
}
-static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
- const u32 **arr, u32 *size,
- bool force_legacy)
+static void mlx5e_port_get_link_mode_info_arr(struct mlx5_core_dev *mdev,
+ const struct mlx5_link_info **arr,
+ u32 *size,
+ bool force_legacy)
{
bool ext = force_legacy ? false : mlx5_ptys_ext_supported(mdev);
- *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
- ARRAY_SIZE(mlx5e_link_speed);
- *arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed;
+ *size = ext ? ARRAY_SIZE(mlx5e_ext_link_info) :
+ ARRAY_SIZE(mlx5e_link_info);
+ *arr = ext ? mlx5e_ext_link_info : mlx5e_link_info;
}
-u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
- bool force_legacy)
+const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ u32 eth_proto_oper,
+ bool force_legacy)
{
unsigned long temp = eth_proto_oper;
- const u32 *table;
- u32 speed = 0;
+ const struct mlx5_link_info *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
i = find_first_bit(&temp, max_size);
if (i < max_size)
- speed = table[i];
- return speed;
+ return &table[i];
+
+ return NULL;
}
-u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
- bool force_legacy)
+u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
+ struct mlx5_link_info *info,
+ bool force_legacy)
{
+ const struct mlx5_link_info *table;
u32 link_modes = 0;
- const u32 *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
for (i = 0; i < max_size; ++i) {
- if (table[i] == speed)
- link_modes |= MLX5E_PROT_MASK(i);
+ if (table[i].speed == info->speed) {
+ if (!info->lanes || table[i].lanes == info->lanes)
+ link_modes |= MLX5E_PROT_MASK(i);
+ }
}
return link_modes;
}
int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
+ const struct mlx5_link_info *table;
struct mlx5_port_eth_proto eproto;
u32 max_speed = 0;
- const u32 *table;
u32 max_size;
bool ext;
int err;
@@ -1200,10 +1191,10 @@ int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
return err;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, false);
for (i = 0; i < max_size; ++i)
if (eproto.cap & MLX5E_PROT_MASK(i))
- max_speed = max(max_speed, table[i]);
+ max_speed = max(max_speed, table[i].speed);
*speed = max_speed;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index a42f6cd99b74..5c552b71e371 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
{
+ u8 mac[ETH_ALEN] = {};
union ib_gid gid;
- u8 mac[ETH_ALEN];
mlx5_rdma_make_default_gid(dev, &gid);
return mlx5_core_roce_gid_set(dev, 0,
@@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
mlx5_nic_vport_disable_roce(dev);
}
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
if (!MLX5_CAP_GEN(dev, roce))
- return;
+ return 0;
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
- return;
+ return err;
}
err = mlx5_rdma_add_roce_addr(dev);
@@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
goto del_roce_addr;
}
- return;
+ return err;
del_roce_addr:
mlx5_rdma_del_roce_addr(dev);
disable_roce:
mlx5_nic_vport_disable_roce(dev);
+ return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
index 750cff2a71a4..3d9e76c3d42f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
@@ -8,12 +8,12 @@
#ifdef CONFIG_MLX5_ESWITCH
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
-static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
+static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index e393391966e0..39a209b9b684 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -56,6 +56,8 @@ bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierar
return cap & TSAR_TYPE_CAP_MASK_ROUND_ROBIN;
case TSAR_ELEMENT_TSAR_TYPE_ETS:
return cap & TSAR_TYPE_CAP_MASK_ETS;
+ case TSAR_ELEMENT_TSAR_TYPE_TC_ARB:
+ return cap & TSAR_TYPE_CAP_MASK_TC_ARB;
}
return false;
@@ -87,6 +89,8 @@ bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hie
return cap & ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP:
return cap & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT:
+ return cap & ELEMENT_TYPE_CAP_MASK_RATE_LIMIT;
}
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index a96be98be032..0864ba625c07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -257,6 +257,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
return 0;
esw_err:
+ mlx5_sf_function_id_erase(table, sf);
mlx5_sf_free(table, sf);
return err;
}
@@ -284,7 +285,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP;
}
- if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
+ if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index a897cdc60fdb..447ea3f8722c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -11,31 +11,29 @@
/* This is the longest supported action sequence for FDB table:
* DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term.
*/
-static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = {
- [MLX5HWS_TABLE_TYPE_FDB] = {
- BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
- BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
- BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
- BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
- BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
- BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
- BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
- BIT(MLX5HWS_ACTION_TYP_CTR),
- BIT(MLX5HWS_ACTION_TYP_TAG),
- BIT(MLX5HWS_ACTION_TYP_ASO_METER),
- BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
- BIT(MLX5HWS_ACTION_TYP_TBL) |
- BIT(MLX5HWS_ACTION_TYP_VPORT) |
- BIT(MLX5HWS_ACTION_TYP_DROP) |
- BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
- BIT(MLX5HWS_ACTION_TYP_RANGE) |
- BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
- BIT(MLX5HWS_ACTION_TYP_LAST),
- },
+static const u32 action_order_arr[MLX5HWS_ACTION_TYP_MAX] = {
+ BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
+ BIT(MLX5HWS_ACTION_TYP_CTR),
+ BIT(MLX5HWS_ACTION_TYP_TAG),
+ BIT(MLX5HWS_ACTION_TYP_ASO_METER),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_TBL) |
+ BIT(MLX5HWS_ACTION_TYP_VPORT) |
+ BIT(MLX5HWS_ACTION_TYP_DROP) |
+ BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
+ BIT(MLX5HWS_ACTION_TYP_RANGE) |
+ BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
+ BIT(MLX5HWS_ACTION_TYP_LAST),
};
static const char * const mlx5hws_action_type_str[] = {
@@ -74,6 +72,11 @@ enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action)
return action->type;
}
+struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action)
+{
+ return action->ctx->mdev;
+}
+
static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
enum mlx5hws_context_shared_stc_type stc_type,
u8 tbl_type)
@@ -83,8 +86,8 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
int ret;
mutex_lock(&ctx->ctrl_lock);
- if (ctx->common_res[tbl_type].shared_stc[stc_type]) {
- ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++;
+ if (ctx->common_res.shared_stc[stc_type]) {
+ ctx->common_res.shared_stc[stc_type]->refcount++;
mutex_unlock(&ctx->ctrl_lock);
return 0;
}
@@ -124,8 +127,8 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
goto free_shared_stc;
}
- ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc;
- ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1;
+ ctx->common_res.shared_stc[stc_type] = shared_stc;
+ ctx->common_res.shared_stc[stc_type]->refcount = 1;
mutex_unlock(&ctx->ctrl_lock);
@@ -178,16 +181,16 @@ static void hws_action_put_shared_stc(struct mlx5hws_action *action,
}
mutex_lock(&ctx->ctrl_lock);
- if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) {
+ if (--ctx->common_res.shared_stc[stc_type]->refcount) {
mutex_unlock(&ctx->ctrl_lock);
return;
}
- shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type];
+ shared_stc = ctx->common_res.shared_stc[stc_type];
mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk);
kfree(shared_stc);
- ctx->common_res[tbl_type].shared_stc[stc_type] = NULL;
+ ctx->common_res.shared_stc[stc_type] = NULL;
mutex_unlock(&ctx->ctrl_lock);
}
@@ -206,10 +209,10 @@ bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
enum mlx5hws_action_type *user_actions,
enum mlx5hws_table_type table_type)
{
- const u32 *order_arr = action_order_arr[table_type];
+ const u32 *order_arr = action_order_arr;
+ bool valid_combo;
u8 order_idx = 0;
u8 user_idx = 0;
- bool valid_combo;
if (table_type >= MLX5HWS_TABLE_TYPE_MAX) {
mlx5hws_err(ctx, "Invalid table_type %d", table_type);
@@ -240,6 +243,7 @@ hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
enum mlx5hws_table_type table_type,
bool is_mirror)
{
+ struct mlx5hws_pool *pool;
bool use_fixup = false;
u32 fw_tbl_type;
u32 base_id;
@@ -255,13 +259,11 @@ hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
use_fixup = true;
break;
}
+ pool = stc_attr->ste_table.ste_pool;
if (!is_mirror)
- base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool,
- &stc_attr->ste_table.ste);
+ base_id = mlx5hws_pool_get_base_id(pool);
else
- base_id =
- mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool,
- &stc_attr->ste_table.ste);
+ base_id = mlx5hws_pool_get_base_mirror_id(pool);
*fixup_stc_attr = *stc_attr;
fixup_stc_attr->ste_table.ste_obj_id = base_id;
@@ -321,8 +323,8 @@ int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
__must_hold(&ctx->ctrl_lock)
{
struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0};
- struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
bool use_fixup;
u32 obj_0_id;
int ret;
@@ -339,7 +341,7 @@ __must_hold(&ctx->ctrl_lock)
if (!mlx5hws_context_cap_dynamic_reparse(ctx))
stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
- obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+ obj_0_id = mlx5hws_pool_get_base_id(stc_pool);
/* According to table/action limitation change the stc_attr */
use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false);
@@ -355,7 +357,7 @@ __must_hold(&ctx->ctrl_lock)
if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
u32 obj_1_id;
- obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+ obj_1_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr,
&fixup_stc_attr,
@@ -387,19 +389,19 @@ void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
struct mlx5hws_pool_chunk *stc)
__must_hold(&ctx->ctrl_lock)
{
- struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
u32 obj_id;
/* Modify the STC not to point to an object */
stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
stc_attr.stc_offset = stc->offset;
- obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+ obj_id = mlx5hws_pool_get_base_id(stc_pool);
mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+ obj_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
}
@@ -473,6 +475,7 @@ static void hws_action_fill_stc_attr(struct mlx5hws_action *action,
break;
case MLX5HWS_ACTION_TYP_TBL:
case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_SAMPLER:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
attr->dest_table_id = obj_id;
@@ -561,7 +564,7 @@ hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id)
if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) {
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
if (ret)
goto out_err;
}
@@ -585,7 +588,7 @@ hws_action_destroy_stcs(struct mlx5hws_action *action)
if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB)
mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
mutex_unlock(&ctx->ctrl_lock);
}
@@ -1187,14 +1190,15 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
struct mlx5hws_action_mh_pattern *pattern,
u32 log_bulk_size)
{
+ u16 num_actions, max_mh_actions = 0, hw_max_actions;
struct mlx5hws_context *ctx = action->ctx;
- u16 num_actions, max_mh_actions = 0;
int i, ret, size_in_bytes;
u32 pat_id, arg_id = 0;
__be64 *new_pattern;
size_t pat_max_sz;
pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE;
+ hw_max_actions = pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE;
size_in_bytes = pat_max_sz * sizeof(__be64);
new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL);
if (!new_pattern)
@@ -1204,16 +1208,20 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
for (i = 0; i < num_of_patterns; i++) {
size_t new_num_actions;
size_t cur_num_actions;
- u32 nope_location;
+ u32 nop_locations;
cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
- mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions,
- pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE,
- &new_num_actions, &nope_location,
- &new_pattern[i * pat_max_sz]);
+ ret = mlx5hws_pat_calc_nop(pattern[i].data, cur_num_actions,
+ hw_max_actions, &new_num_actions,
+ &nop_locations,
+ &new_pattern[i * pat_max_sz]);
+ if (ret) {
+ mlx5hws_err(ctx, "Too many actions after nop insertion\n");
+ goto free_new_pat;
+ }
- action[i].modify_header.nope_locations = nope_location;
+ action[i].modify_header.nop_locations = nop_locations;
action[i].modify_header.num_of_actions = new_num_actions;
max_mh_actions = max(max_mh_actions, new_num_actions);
@@ -1260,7 +1268,7 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
MLX5_GET(set_action_in, pattern[i].data, action_type);
} else {
/* Multiple modify actions require a pattern */
- if (unlikely(action[i].modify_header.nope_locations)) {
+ if (unlikely(action[i].modify_header.nop_locations)) {
size_t pattern_sz;
pattern_sz = action[i].modify_header.num_of_actions *
@@ -1362,8 +1370,8 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
struct mlx5hws_cmd_forward_tbl *fw_island;
struct mlx5hws_action *action;
- u32 i /*, packet_reformat_id*/;
- int ret;
+ int ret, last_dest_idx = -1;
+ u32 i;
if (num_dest <= 1) {
mlx5hws_err(ctx, "Action must have multiple dests\n");
@@ -1393,11 +1401,8 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_attr.ignore_flow_level = ignore_flow_level;
- /* ToDo: In SW steering we have a handling of 'go to WIRE'
- * destination here by upper layer setting 'is_wire_ft' flag
- * if the destination is wire.
- * This is because uplink should be last dest in the list.
- */
+ if (dests[i].is_wire_ft)
+ last_dest_idx = i;
break;
case MLX5HWS_ACTION_TYP_VPORT:
dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
@@ -1421,6 +1426,9 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
}
}
+ if (last_dest_idx != -1)
+ swap(dest_list[last_dest_idx], dest_list[num_dest - 1]);
+
fte_attr.dests_num = num_dest;
fte_attr.dests = dest_list;
@@ -1576,17 +1584,15 @@ hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx)
return definer;
}
-static struct mlx5hws_matcher_action_ste *
+static struct mlx5hws_range_action_table *
hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
struct mlx5hws_definer *definer,
u32 miss_ft_id)
{
struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
- struct mlx5hws_action_default_stc *default_stc;
- struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_range_action_table *table_ste;
struct mlx5hws_pool_attr pool_attr = {0};
struct mlx5hws_pool *ste_pool, *stc_pool;
- struct mlx5hws_pool_chunk *ste;
u32 *rtc_0_id, *rtc_1_id;
u32 obj_id;
int ret;
@@ -1605,7 +1611,6 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
pool_attr.alloc_log_sz = 1;
table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
if (!table_ste->pool) {
@@ -1617,8 +1622,6 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
rtc_0_id = &table_ste->rtc_0_id;
rtc_1_id = &table_ste->rtc_1_id;
ste_pool = table_ste->pool;
- ste = &table_ste->ste;
- ste->order = 1;
rtc_attr.log_size = 0;
rtc_attr.log_depth = 0;
@@ -1630,18 +1633,16 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
rtc_attr.fw_gen_wqe = true;
rtc_attr.is_scnd_range = true;
- obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_id(ste_pool);
rtc_attr.pd = ctx->pd_num;
rtc_attr.ste_base = obj_id;
- rtc_attr.ste_offset = ste->offset;
rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
/* STC is a single resource (obj_id), use any STC for the ID */
- stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB];
- default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc;
- obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ stc_pool = ctx->stc_pool;
+ obj_id = mlx5hws_pool_get_base_id(stc_pool);
rtc_attr.stc_base = obj_id;
ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
@@ -1651,11 +1652,11 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
}
/* Create mirror RTC */
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_mirror_id(ste_pool);
rtc_attr.ste_base = obj_id;
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true);
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
rtc_attr.stc_base = obj_id;
ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
@@ -1678,9 +1679,9 @@ free_ste:
return NULL;
}
-static void
-hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
- struct mlx5hws_matcher_action_ste *table_ste)
+static void hws_action_destroy_dest_match_range_table(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_range_action_table *table_ste)
{
mutex_lock(&ctx->ctrl_lock);
@@ -1692,12 +1693,11 @@ hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
mutex_unlock(&ctx->ctrl_lock);
}
-static int
-hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
- struct mlx5hws_matcher_action_ste *table_ste,
- struct mlx5hws_action *hit_ft_action,
- struct mlx5hws_definer *range_definer,
- u32 min, u32 max)
+static int hws_action_create_dest_match_range_fill_table(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_range_action_table *table_ste,
+ struct mlx5hws_action *hit_ft_action,
+ struct mlx5hws_definer *range_definer, u32 min, u32 max)
{
struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0};
struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0};
@@ -1731,7 +1731,7 @@ hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
ste_attr.used_id_rtc_0 = &used_rtc_0_id;
ste_attr.used_id_rtc_1 = &used_rtc_1_id;
- common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB];
+ common_res = &ctx->common_res;
/* init an empty match STE which will always hit */
ste_attr.wqe_ctrl = &wqe_ctrl;
@@ -1750,7 +1750,7 @@ hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29);
wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
- htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset);
+ htonl(hit_ft_action->stc.offset);
wqe_data_arr = (__force __be32 *)&range_wqe_data;
@@ -1793,7 +1793,7 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
u32 min, u32 max, u32 flags)
{
struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
- struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_range_action_table *table_ste;
struct mlx5hws_action *hit_ft_action;
struct mlx5hws_definer *definer;
struct mlx5hws_action *action;
@@ -1838,12 +1838,11 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
- stc_attr.ste_table.ste = table_ste->ste;
stc_attr.ste_table.ste_pool = table_ste->pool;
stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
if (ret)
goto error_unlock;
@@ -1875,7 +1874,50 @@ struct mlx5hws_action *
mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
u32 sampler_id, u32 flags)
{
- mlx5hws_err(ctx, "Flow sampler action - unsupported\n");
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ struct mlx5hws_cmd_set_fte_dest dest;
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (flags != (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
+ mlx5hws_err(ctx, "Unsupported flags for flow sampler\n");
+ return NULL;
+ }
+
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ dest.destination_id = sampler_id;
+
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
+ fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+
+ fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!fw_island)
+ return NULL;
+
+ action = hws_action_create_generic(ctx, flags,
+ MLX5HWS_ACTION_TYP_SAMPLER);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = hws_action_create_stcs(action, fw_island->ft_id);
+ if (ret)
+ goto free_action;
+
+ action->flow_sampler.fw_island = fw_island;
+
+ return action;
+
+free_action:
+ kfree(action);
+destroy_fw_island:
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
return NULL;
}
@@ -1914,6 +1956,11 @@ static void hws_action_destroy_hws(struct mlx5hws_action *action)
}
kfree(action->dest_array.dest_list);
break;
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ hws_action_destroy_stcs(action);
+ mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev,
+ action->flow_sampler.fw_island);
+ break;
case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5HWS_ACTION_TYP_MODIFY_HDR:
shared_arg = false;
@@ -1970,8 +2017,8 @@ __must_hold(&ctx->ctrl_lock)
struct mlx5hws_action_default_stc *default_stc;
int ret;
- if (ctx->common_res[tbl_type].default_stc) {
- ctx->common_res[tbl_type].default_stc->refcount++;
+ if (ctx->common_res.default_stc) {
+ ctx->common_res.default_stc->refcount++;
return 0;
}
@@ -2023,8 +2070,8 @@ __must_hold(&ctx->ctrl_lock)
goto free_nop_dw7;
}
- ctx->common_res[tbl_type].default_stc = default_stc;
- ctx->common_res[tbl_type].default_stc->refcount++;
+ ctx->common_res.default_stc = default_stc;
+ ctx->common_res.default_stc->refcount++;
return 0;
@@ -2046,9 +2093,7 @@ __must_hold(&ctx->ctrl_lock)
{
struct mlx5hws_action_default_stc *default_stc;
- default_stc = ctx->common_res[tbl_type].default_stc;
-
- default_stc = ctx->common_res[tbl_type].default_stc;
+ default_stc = ctx->common_res.default_stc;
if (--default_stc->refcount)
return;
@@ -2058,28 +2103,30 @@ __must_hold(&ctx->ctrl_lock)
mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
kfree(default_stc);
- ctx->common_res[tbl_type].default_stc = NULL;
+ ctx->common_res.default_stc = NULL;
}
static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
u32 arg_idx,
u8 *arg_data,
u16 num_of_actions,
- u32 nope_locations)
+ u32 nop_locations)
{
u8 *new_arg_data = NULL;
int i, j;
- if (unlikely(nope_locations)) {
+ if (unlikely(nop_locations)) {
new_arg_data = kcalloc(num_of_actions,
MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
if (unlikely(!new_arg_data))
return;
- for (i = 0, j = 0; i < num_of_actions; i++, j++) {
- memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE);
- if (BIT(i) & nope_locations)
+ for (i = 0, j = 0; j < num_of_actions; i++, j++) {
+ if (BIT(i) & nop_locations)
j++;
+ memcpy(&new_arg_data[j * MLX5HWS_MODIFY_ACTION_SIZE],
+ &arg_data[i * MLX5HWS_MODIFY_ACTION_SIZE],
+ MLX5HWS_MODIFY_ACTION_SIZE);
}
}
@@ -2150,8 +2197,7 @@ hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply,
{
struct mlx5hws_action *action = apply->rule_action[action_idx].action;
- apply->wqe_ctrl->stc_ix[stc_idx] =
- htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[stc_idx] = htonl(action->stc.offset);
}
static void
@@ -2176,12 +2222,13 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
struct mlx5hws_action *action;
u32 arg_sz, arg_idx;
u8 *single_action;
+ u8 max_actions;
__be32 stc_idx;
rule_action = &apply->rule_action[setter->idx_double];
action = rule_action->action;
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2203,21 +2250,23 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] =
*(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);
- } else {
- /* Argument offset multiple with number of args per these actions */
- arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
- arg_idx = rule_action->modify_header.offset * arg_sz;
-
- apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
-
- if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
- apply->require_dep = 1;
- hws_action_modify_write(apply->queue,
- action->modify_header.arg_id + arg_idx,
- rule_action->modify_header.data,
- action->modify_header.num_of_actions,
- action->modify_header.nope_locations);
- }
+ return;
+ }
+
+ /* Argument offset multiple with number of args per these actions */
+ max_actions = action->modify_header.max_num_of_actions;
+ arg_sz = mlx5hws_arg_get_arg_size(max_actions);
+ arg_idx = rule_action->modify_header.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ hws_action_modify_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->modify_header.data,
+ action->modify_header.num_of_actions,
+ action->modify_header.nop_locations);
}
}
@@ -2240,7 +2289,7 @@ hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2272,7 +2321,7 @@ hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2434,6 +2483,7 @@ int mlx5hws_action_template_process(struct mlx5hws_action_template *at)
case MLX5HWS_ACTION_TYP_DROP:
case MLX5HWS_ACTION_TYP_TBL:
case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_SAMPLER:
case MLX5HWS_ACTION_TYP_VPORT:
case MLX5HWS_ACTION_TYP_MISS:
/* Hit action */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
index e8f562c31826..55a079fdd08f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
@@ -70,12 +70,12 @@ struct mlx5hws_action_default_stc {
struct mlx5hws_pool_chunk nop_dw6;
struct mlx5hws_pool_chunk nop_dw7;
struct mlx5hws_pool_chunk default_hit;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_action_shared_stc {
struct mlx5hws_pool_chunk stc_chunk;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_actions_apply_data {
@@ -118,19 +118,25 @@ struct mlx5hws_action_template {
u8 only_term;
};
+struct mlx5hws_range_action_table {
+ struct mlx5hws_pool *pool;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+};
+
struct mlx5hws_action {
u8 type;
u8 flags;
struct mlx5hws_context *ctx;
union {
struct {
- struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pool_chunk stc;
union {
struct {
u32 pat_id;
u32 arg_id;
__be64 single_action;
- u32 nope_locations;
+ u32 nop_locations;
u8 num_of_patterns;
u8 single_action_type;
u8 num_of_actions;
@@ -166,6 +172,9 @@ struct mlx5hws_action {
struct mlx5hws_cmd_set_fte_dest *dest_list;
} dest_array;
struct {
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ } flow_sampler;
+ struct {
u8 type;
u8 start_anchor;
u8 end_anchor;
@@ -183,7 +192,7 @@ struct mlx5hws_action {
size_t size;
} remove_header;
struct {
- struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_range_action_table *table_ste;
struct mlx5hws_action *hit_ft_action;
struct mlx5hws_definer *definer;
} range;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
new file mode 100644
index 000000000000..5766a9c82f96
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+static const char *
+hws_pool_opt_to_str(enum mlx5hws_pool_optimize opt)
+{
+ switch (opt) {
+ case MLX5HWS_POOL_OPTIMIZE_NONE:
+ return "rx-and-tx";
+ case MLX5HWS_POOL_OPTIMIZE_ORIG:
+ return "rx-only";
+ case MLX5HWS_POOL_OPTIMIZE_MIRROR:
+ return "tx-only";
+ default:
+ return "unknown";
+ }
+}
+
+static int
+hws_action_ste_table_create_pool(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz)
+{
+ struct mlx5hws_pool_attr pool_attr = { 0 };
+
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ pool_attr.flags = MLX5HWS_POOL_FLAG_BUDDY;
+ pool_attr.opt_type = opt;
+ pool_attr.alloc_log_sz = log_sz;
+
+ action_tbl->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!action_tbl->pool) {
+ mlx5hws_err(ctx, "Failed to allocate STE pool\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hws_action_ste_table_create_single_rtc(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz, bool tx)
+{
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = { 0 };
+ u32 *rtc_id;
+
+ rtc_attr.log_depth = 0;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ /* Action STEs use the default always hit definer. */
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.is_frst_jumbo = false;
+ rtc_attr.miss_ft_id = 0;
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+
+ if (tx) {
+ rtc_attr.table_type = FS_FT_FDB_TX;
+ rtc_attr.ste_base =
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool);
+ rtc_attr.stc_base =
+ mlx5hws_pool_get_base_mirror_id(ctx->stc_pool);
+ rtc_attr.log_size =
+ opt == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_sz;
+ rtc_id = &action_tbl->rtc_1_id;
+ } else {
+ rtc_attr.table_type = FS_FT_FDB_RX;
+ rtc_attr.ste_base = mlx5hws_pool_get_base_id(action_tbl->pool);
+ rtc_attr.stc_base = mlx5hws_pool_get_base_id(ctx->stc_pool);
+ rtc_attr.log_size =
+ opt == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_sz;
+ rtc_id = &action_tbl->rtc_0_id;
+ }
+
+ return mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_id);
+}
+
+static int
+hws_action_ste_table_create_rtcs(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz)
+{
+ int err;
+
+ err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt,
+ log_sz, false);
+ if (err)
+ return err;
+
+ err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt,
+ log_sz, true);
+ if (err) {
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, action_tbl->rtc_0_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+hws_action_ste_table_destroy_rtcs(struct mlx5hws_action_ste_table *action_tbl)
+{
+ mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev,
+ action_tbl->rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev,
+ action_tbl->rtc_0_id);
+}
+
+static int
+hws_action_ste_table_create_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = { 0 };
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste_pool = action_tbl->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ return mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
+ MLX5HWS_TABLE_TYPE_FDB,
+ &action_tbl->stc);
+}
+
+static struct mlx5hws_action_ste_table *
+hws_action_ste_table_alloc(struct mlx5hws_action_ste_pool_element *parent_elem)
+{
+ enum mlx5hws_pool_optimize opt = parent_elem->opt;
+ struct mlx5hws_context *ctx = parent_elem->ctx;
+ struct mlx5hws_action_ste_table *action_tbl;
+ size_t log_sz;
+ int err;
+
+ log_sz = min(parent_elem->log_sz ?
+ parent_elem->log_sz +
+ MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ :
+ MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ,
+ MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ);
+
+ action_tbl = kzalloc(sizeof(*action_tbl), GFP_KERNEL);
+ if (!action_tbl)
+ return ERR_PTR(-ENOMEM);
+
+ err = hws_action_ste_table_create_pool(ctx, action_tbl, opt, log_sz);
+ if (err)
+ goto free_tbl;
+
+ err = hws_action_ste_table_create_rtcs(ctx, action_tbl, opt, log_sz);
+ if (err)
+ goto destroy_pool;
+
+ err = hws_action_ste_table_create_stc(ctx, action_tbl);
+ if (err)
+ goto destroy_rtcs;
+
+ action_tbl->parent_elem = parent_elem;
+ INIT_LIST_HEAD(&action_tbl->list_node);
+ action_tbl->last_used = jiffies;
+ list_add(&action_tbl->list_node, &parent_elem->available);
+ parent_elem->log_sz = log_sz;
+
+ mlx5hws_dbg(ctx,
+ "Allocated %s action STE table log_sz %zu; STEs (%d, %d); RTCs (%d, %d); STC %d\n",
+ hws_pool_opt_to_str(opt), log_sz,
+ mlx5hws_pool_get_base_id(action_tbl->pool),
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool),
+ action_tbl->rtc_0_id, action_tbl->rtc_1_id,
+ action_tbl->stc.offset);
+
+ return action_tbl;
+
+destroy_rtcs:
+ hws_action_ste_table_destroy_rtcs(action_tbl);
+destroy_pool:
+ mlx5hws_pool_destroy(action_tbl->pool);
+free_tbl:
+ kfree(action_tbl);
+
+ return ERR_PTR(err);
+}
+
+static void
+hws_action_ste_table_destroy(struct mlx5hws_action_ste_table *action_tbl)
+{
+ struct mlx5hws_context *ctx = action_tbl->parent_elem->ctx;
+
+ mlx5hws_dbg(ctx,
+ "Destroying %s action STE table: STEs (%d, %d); RTCs (%d, %d); STC %d\n",
+ hws_pool_opt_to_str(action_tbl->parent_elem->opt),
+ mlx5hws_pool_get_base_id(action_tbl->pool),
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool),
+ action_tbl->rtc_0_id, action_tbl->rtc_1_id,
+ action_tbl->stc.offset);
+
+ mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
+ &action_tbl->stc);
+ hws_action_ste_table_destroy_rtcs(action_tbl);
+ mlx5hws_pool_destroy(action_tbl->pool);
+
+ list_del(&action_tbl->list_node);
+ kfree(action_tbl);
+}
+
+static int
+hws_action_ste_pool_element_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_pool_element *elem,
+ enum mlx5hws_pool_optimize opt)
+{
+ elem->ctx = ctx;
+ elem->opt = opt;
+ INIT_LIST_HEAD(&elem->available);
+ INIT_LIST_HEAD(&elem->full);
+
+ return 0;
+}
+
+static void hws_action_ste_pool_element_destroy(
+ struct mlx5hws_action_ste_pool_element *elem)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+
+ /* This should be empty, but attempt to free its elements anyway. */
+ list_for_each_entry_safe(action_tbl, p, &elem->full, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+
+ list_for_each_entry_safe(action_tbl, p, &elem->available, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+}
+
+static int hws_action_ste_pool_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_pool *pool)
+{
+ enum mlx5hws_pool_optimize opt;
+ int err;
+
+ mutex_init(&pool->lock);
+
+ /* Rules which are added for both RX and TX must use the same action STE
+ * indices for both. If we were to use a single table, then RX-only and
+ * TX-only rules would waste the unused entries. Thus, we use separate
+ * table sets for the three cases.
+ */
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX;
+ opt++) {
+ err = hws_action_ste_pool_element_init(ctx, &pool->elems[opt],
+ opt);
+ if (err)
+ goto destroy_elems;
+ pool->elems[opt].parent_pool = pool;
+ }
+
+ return 0;
+
+destroy_elems:
+ while (opt-- > MLX5HWS_POOL_OPTIMIZE_NONE)
+ hws_action_ste_pool_element_destroy(&pool->elems[opt]);
+
+ return err;
+}
+
+static void hws_action_ste_pool_destroy(struct mlx5hws_action_ste_pool *pool)
+{
+ int opt;
+
+ for (opt = MLX5HWS_POOL_OPTIMIZE_MAX - 1;
+ opt >= MLX5HWS_POOL_OPTIMIZE_NONE; opt--)
+ hws_action_ste_pool_element_destroy(&pool->elems[opt]);
+}
+
+static void hws_action_ste_pool_element_collect_stale(
+ struct mlx5hws_action_ste_pool_element *elem, struct list_head *cleanup)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+ unsigned long expire_time, now;
+
+ expire_time = secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS);
+ now = jiffies;
+
+ list_for_each_entry_safe(action_tbl, p, &elem->available, list_node) {
+ if (mlx5hws_pool_full(action_tbl->pool) &&
+ time_before(action_tbl->last_used + expire_time, now))
+ list_move(&action_tbl->list_node, cleanup);
+ }
+}
+
+static void hws_action_ste_table_cleanup_list(struct list_head *cleanup)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+
+ list_for_each_entry_safe(action_tbl, p, cleanup, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+}
+
+static void hws_action_ste_pool_cleanup(struct work_struct *work)
+{
+ enum mlx5hws_pool_optimize opt;
+ struct mlx5hws_context *ctx;
+ LIST_HEAD(cleanup);
+ int i;
+
+ ctx = container_of(work, struct mlx5hws_context,
+ action_ste_cleanup.work);
+
+ for (i = 0; i < ctx->queues; i++) {
+ struct mlx5hws_action_ste_pool *p = &ctx->action_ste_pool[i];
+
+ mutex_lock(&p->lock);
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE;
+ opt < MLX5HWS_POOL_OPTIMIZE_MAX; opt++)
+ hws_action_ste_pool_element_collect_stale(
+ &p->elems[opt], &cleanup);
+ mutex_unlock(&p->lock);
+ }
+
+ hws_action_ste_table_cleanup_list(&cleanup);
+
+ schedule_delayed_work(&ctx->action_ste_cleanup,
+ secs_to_jiffies(
+ MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS));
+}
+
+int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_ste_pool *pool;
+ size_t queues = ctx->queues;
+ int i, err;
+
+ pool = kcalloc(queues, sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return -ENOMEM;
+
+ for (i = 0; i < queues; i++) {
+ err = hws_action_ste_pool_init(ctx, &pool[i]);
+ if (err)
+ goto free_pool;
+ }
+
+ ctx->action_ste_pool = pool;
+
+ INIT_DELAYED_WORK(&ctx->action_ste_cleanup,
+ hws_action_ste_pool_cleanup);
+ schedule_delayed_work(
+ &ctx->action_ste_cleanup,
+ secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS));
+
+ return 0;
+
+free_pool:
+ while (i--)
+ hws_action_ste_pool_destroy(&pool[i]);
+ kfree(pool);
+
+ return err;
+}
+
+void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx)
+{
+ size_t queues = ctx->queues;
+ int i;
+
+ cancel_delayed_work_sync(&ctx->action_ste_cleanup);
+
+ for (i = 0; i < queues; i++)
+ hws_action_ste_pool_destroy(&ctx->action_ste_pool[i]);
+
+ kfree(ctx->action_ste_pool);
+}
+
+static struct mlx5hws_action_ste_pool_element *
+hws_action_ste_choose_elem(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx)
+{
+ if (skip_rx)
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_MIRROR];
+
+ if (skip_tx)
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_ORIG];
+
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_NONE];
+}
+
+static int
+hws_action_ste_table_chunk_alloc(struct mlx5hws_action_ste_table *action_tbl,
+ struct mlx5hws_action_ste_chunk *chunk)
+{
+ int err;
+
+ err = mlx5hws_pool_chunk_alloc(action_tbl->pool, &chunk->ste);
+ if (err)
+ return err;
+
+ chunk->action_tbl = action_tbl;
+ action_tbl->last_used = jiffies;
+
+ return 0;
+}
+
+int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx,
+ struct mlx5hws_action_ste_chunk *chunk)
+{
+ struct mlx5hws_action_ste_pool_element *elem;
+ struct mlx5hws_action_ste_table *action_tbl;
+ bool found;
+ int err;
+
+ if (skip_rx && skip_tx)
+ return -EINVAL;
+
+ mutex_lock(&pool->lock);
+
+ elem = hws_action_ste_choose_elem(pool, skip_rx, skip_tx);
+
+ mlx5hws_dbg(elem->ctx,
+ "Allocating action STEs skip_rx %d skip_tx %d order %d\n",
+ skip_rx, skip_tx, chunk->ste.order);
+
+ found = false;
+ list_for_each_entry(action_tbl, &elem->available, list_node) {
+ if (!hws_action_ste_table_chunk_alloc(action_tbl, chunk)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ action_tbl = hws_action_ste_table_alloc(elem);
+ if (IS_ERR(action_tbl)) {
+ err = PTR_ERR(action_tbl);
+ goto out;
+ }
+
+ err = hws_action_ste_table_chunk_alloc(action_tbl, chunk);
+ if (err)
+ goto out;
+ }
+
+ if (mlx5hws_pool_empty(action_tbl->pool))
+ list_move(&action_tbl->list_node, &elem->full);
+
+ err = 0;
+
+out:
+ mutex_unlock(&pool->lock);
+
+ return err;
+}
+
+void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk)
+{
+ struct mutex *lock = &chunk->action_tbl->parent_elem->parent_pool->lock;
+
+ mlx5hws_dbg(chunk->action_tbl->pool->ctx,
+ "Freeing action STEs offset %d order %d\n",
+ chunk->ste.offset, chunk->ste.order);
+
+ mutex_lock(lock);
+ mlx5hws_pool_chunk_free(chunk->action_tbl->pool, &chunk->ste);
+ chunk->action_tbl->last_used = jiffies;
+ list_move(&chunk->action_tbl->list_node,
+ &chunk->action_tbl->parent_elem->available);
+ mutex_unlock(lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h
new file mode 100644
index 000000000000..a8ba97359e31
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef ACTION_STE_POOL_H_
+#define ACTION_STE_POOL_H_
+
+#define MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ 10
+#define MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ 1
+#define MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ 20
+
+#define MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS 300
+#define MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS 300
+
+struct mlx5hws_action_ste_pool_element;
+
+struct mlx5hws_action_ste_table {
+ struct mlx5hws_action_ste_pool_element *parent_elem;
+ /* Wraps the RTC and STE range for this given action. */
+ struct mlx5hws_pool *pool;
+ /* Match STEs use this STC to jump to this pool's RTC. */
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct list_head list_node;
+ unsigned long last_used;
+};
+
+struct mlx5hws_action_ste_pool_element {
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_action_ste_pool *parent_pool;
+ size_t log_sz; /* Size of the largest table so far. */
+ enum mlx5hws_pool_optimize opt;
+ struct list_head available;
+ struct list_head full;
+};
+
+/* Central repository of action STEs. The context contains one of these pools
+ * per queue.
+ */
+struct mlx5hws_action_ste_pool {
+ /* Protects the entire pool. We have one pool per queue and only one
+ * operation can be active per rule at a given time. Thus this lock
+ * protects solely against concurrent garbage collection and we expect
+ * very little contention.
+ */
+ struct mutex lock;
+ struct mlx5hws_action_ste_pool_element elems[MLX5HWS_POOL_OPTIMIZE_MAX];
+};
+
+/* A chunk of STEs and the table it was allocated from. Used by rules. */
+struct mlx5hws_action_ste_chunk {
+ struct mlx5hws_action_ste_table *action_tbl;
+ struct mlx5hws_pool_chunk ste;
+};
+
+int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx);
+
+void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx);
+
+/* Callers are expected to fill chunk->ste.order. On success, this function
+ * populates chunk->tbl and chunk->ste.offset.
+ */
+int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx,
+ struct mlx5hws_action_ste_chunk *chunk);
+
+void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk);
+
+#endif /* ACTION_STE_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index baacf662c0ab..9e057f808ea5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -46,10 +46,14 @@ static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
}
}
-static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
+static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u32 priority,
- u8 size_log)
+ u8 size_log,
+ struct mlx5hws_matcher_attr *attr)
{
+ struct mlx5hws_bwc_matcher *first_matcher =
+ bwc_matcher->complex_first_bwc_matcher;
+
memset(attr, 0, sizeof(*attr));
attr->priority = priority;
@@ -61,6 +65,9 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
attr->rule.num_log = size_log;
attr->resizable = true;
attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+
+ attr->isolated_matcher_end_ft_id =
+ first_matcher ? first_matcher->matcher->end_ft_id : 0;
}
int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
@@ -83,20 +90,27 @@ int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
for (i = 0; i < bwc_queues; i++)
INIT_LIST_HEAD(&bwc_matcher->rules[i]);
- hws_bwc_matcher_init_attr(&attr,
+ hws_bwc_matcher_init_attr(bwc_matcher,
priority,
- MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG);
+ MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG,
+ &attr);
bwc_matcher->priority = priority;
bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+ bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+ bwc_matcher->at = kcalloc(bwc_matcher->size_of_at_array,
+ sizeof(*bwc_matcher->at), GFP_KERNEL);
+ if (!bwc_matcher->at)
+ goto free_bwc_matcher_rules;
+
/* create dummy action template */
bwc_matcher->at[0] =
mlx5hws_action_template_create(action_types ?
action_types : init_action_types);
if (!bwc_matcher->at[0]) {
mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
- goto free_bwc_matcher_rules;
+ goto free_bwc_matcher_at_array;
}
bwc_matcher->num_of_at = 1;
@@ -126,6 +140,8 @@ free_mt:
mlx5hws_match_template_destroy(bwc_matcher->mt);
free_at:
mlx5hws_action_template_destroy(bwc_matcher->at[0]);
+free_bwc_matcher_at_array:
+ kfree(bwc_matcher->at);
free_bwc_matcher_rules:
kfree(bwc_matcher->rules);
err:
@@ -152,6 +168,9 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
if (!bwc_matcher)
return NULL;
+ atomic_set(&bwc_matcher->num_of_rules, 0);
+ atomic_set(&bwc_matcher->rehash_required, false);
+
/* Check if the required match params can be all matched
* in single STE, otherwise complex matcher is needed.
*/
@@ -190,6 +209,7 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
for (i = 0; i < bwc_matcher->num_of_at; i++)
mlx5hws_action_template_destroy(bwc_matcher->at[i]);
+ kfree(bwc_matcher->at);
mlx5hws_match_template_destroy(bwc_matcher->mt);
kfree(bwc_matcher->rules);
@@ -199,22 +219,29 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- if (bwc_matcher->num_of_rules)
+ u32 num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
+
+ if (num_of_rules)
mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
"BWC matcher destroy: matcher still has %d rules\n",
- bwc_matcher->num_of_rules);
+ num_of_rules);
- mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+ if (bwc_matcher->complex)
+ mlx5hws_bwc_matcher_destroy_complex(bwc_matcher);
+ else
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
kfree(bwc_matcher);
return 0;
}
-static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
- u16 queue_id,
- u32 *pending_rules,
- bool drain)
+int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain)
{
+ unsigned long timeout = jiffies +
+ secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT);
struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
bool got_comp = *pending_rules >= burst_th;
@@ -250,6 +277,11 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
}
got_comp = !!ret;
+
+ if (unlikely(!got_comp && time_after(jiffies, timeout))) {
+ mlx5hws_err(ctx, "BWC poll error: polling queue %d - TIMEOUT\n", queue_id);
+ return -ETIMEDOUT;
+ }
}
return err;
@@ -309,16 +341,12 @@ static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
{
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- bwc_matcher->num_of_rules++;
bwc_rule->bwc_queue_idx = idx;
list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
}
static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
{
- struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
-
- bwc_matcher->num_of_rules--;
list_del_init(&bwc_rule->list_node);
}
@@ -334,22 +362,22 @@ hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_rule_attr *rule_attr)
{
struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
- struct mlx5hws_flow_op_result completion;
+ u32 expected_completions = 1;
int ret;
ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
if (unlikely(ret))
return ret;
- do {
- ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
- } while (ret != 1);
+ ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
+ if (unlikely(ret))
+ return ret;
- if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS ||
- (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
- bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) {
- mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n",
- completion.status, bwc_rule->rule->status);
+ if (unlikely(bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
+ bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING)) {
+ mlx5hws_err(ctx, "Failed destroying BWC rule: rule status %d\n",
+ bwc_rule->rule->status);
return -EINVAL;
}
@@ -372,6 +400,7 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
mutex_lock(queue_lock);
ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
+ atomic_dec(&bwc_matcher->num_of_rules);
hws_bwc_rule_list_remove(bwc_rule);
mutex_unlock(queue_lock);
@@ -381,9 +410,13 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
{
- int ret;
+ bool is_complex = !!bwc_rule->bwc_matcher->complex;
+ int ret = 0;
- ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+ if (is_complex)
+ ret = mlx5hws_bwc_rule_destroy_complex(bwc_rule);
+ else
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
mlx5hws_bwc_rule_free(bwc_rule);
return ret;
@@ -423,9 +456,8 @@ hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret))
return ret;
- ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
-
- return ret;
+ return mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
}
static int
@@ -446,7 +478,8 @@ hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret))
return ret;
- ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
if (unlikely(ret))
mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
@@ -458,8 +491,10 @@ hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
{
struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
- return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
- caps->ste_alloc_log_max - 1;
+ /* check the match RTC size */
+ return (bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
+ MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
+ (caps->ste_alloc_log_max - 1);
}
static bool
@@ -496,6 +531,23 @@ hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_rule_action rule_actions[])
{
enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
+ void *p;
+
+ if (unlikely(bwc_matcher->num_of_at >= bwc_matcher->size_of_at_array)) {
+ if (bwc_matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT)
+ return -ENOMEM;
+ bwc_matcher->size_of_at_array *= 2;
+ p = krealloc(bwc_matcher->at,
+ bwc_matcher->size_of_at_array *
+ sizeof(*bwc_matcher->at),
+ __GFP_ZERO | GFP_KERNEL);
+ if (!p) {
+ bwc_matcher->size_of_at_array /= 2;
+ return -ENOMEM;
+ }
+
+ bwc_matcher->at = p;
+ }
hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
@@ -558,93 +610,79 @@ hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
{
+ bool move_error = false, poll_error = false, drain_error = false;
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
u16 bwc_queues = mlx5hws_bwc_queues(ctx);
- struct mlx5hws_bwc_rule **bwc_rules;
struct mlx5hws_rule_attr rule_attr;
- u32 *pending_rules;
- int i, j, ret = 0;
- bool all_done;
- u16 burst_th;
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct mlx5hws_send_engine *queue;
+ struct list_head *rules_list;
+ u32 pending_rules;
+ int i, ret = 0;
mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
- pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL);
- if (!pending_rules)
- return -ENOMEM;
-
- bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL);
- if (!bwc_rules) {
- ret = -ENOMEM;
- goto free_pending_rules;
- }
-
for (i = 0; i < bwc_queues; i++) {
if (list_empty(&bwc_matcher->rules[i]))
- bwc_rules[i] = NULL;
- else
- bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i],
- struct mlx5hws_bwc_rule,
- list_node);
- }
+ continue;
- do {
- all_done = true;
+ pending_rules = 0;
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+ rules_list = &bwc_matcher->rules[i];
- for (i = 0; i < bwc_queues; i++) {
- rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
- burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
-
- for (j = 0; j < burst_th && bwc_rules[i]; j++) {
- rule_attr.burst = !!((j + 1) % burst_th);
- ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher,
- bwc_rules[i]->rule,
- &rule_attr);
- if (unlikely(ret)) {
- mlx5hws_err(ctx,
- "Moving BWC rule failed during rehash (%d)\n",
- ret);
- goto free_bwc_rules;
- }
-
- all_done = false;
- pending_rules[i]++;
- bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node,
- &bwc_matcher->rules[i]) ?
- NULL : list_next_entry(bwc_rules[i], list_node);
+ list_for_each_entry(bwc_rule, rules_list, list_node) {
+ ret = mlx5hws_matcher_resize_rule_move(matcher,
+ bwc_rule->rule,
+ &rule_attr);
+ if (unlikely(ret && !move_error)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = true;
+ }
- ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
- &pending_rules[i], false);
- if (unlikely(ret))
- goto free_bwc_rules;
+ pending_rules++;
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &pending_rules,
+ false);
+ if (unlikely(ret && !poll_error)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: poll failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = true;
}
}
- } while (!all_done);
- /* drain all the bwc queues */
- for (i = 0; i < bwc_queues; i++) {
- if (pending_rules[i]) {
- u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
-
- mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
- ret = hws_bwc_queue_poll(ctx, queue_id,
- &pending_rules[i], true);
- if (unlikely(ret))
- goto free_bwc_rules;
+ if (pending_rules) {
+ queue = &ctx->send_queue[rule_attr.queue_id];
+ mlx5hws_send_engine_flush_queue(queue);
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &pending_rules,
+ true);
+ if (unlikely(ret && !drain_error)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: drain failed (%d), attempting to move rest of the rules\n",
+ ret);
+ drain_error = true;
+ }
}
}
-free_bwc_rules:
- kfree(bwc_rules);
-free_pending_rules:
- kfree(pending_rules);
+ if (move_error || poll_error || drain_error)
+ ret = -EINVAL;
return ret;
}
static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- return hws_bwc_matcher_move_all_simple(bwc_matcher);
+ if (!bwc_matcher->complex)
+ return hws_bwc_matcher_move_all_simple(bwc_matcher);
+
+ return mlx5hws_bwc_matcher_move_all_complex(bwc_matcher);
}
static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
@@ -655,9 +693,10 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
struct mlx5hws_matcher *new_matcher;
int ret;
- hws_bwc_matcher_init_attr(&matcher_attr,
+ hws_bwc_matcher_init_attr(bwc_matcher,
bwc_matcher->priority,
- bwc_matcher->size_log);
+ bwc_matcher->size_log,
+ &matcher_attr);
old_matcher = bwc_matcher->matcher;
new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
@@ -677,15 +716,18 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
}
ret = hws_bwc_matcher_move_all(bwc_matcher);
- if (ret) {
- mlx5hws_err(ctx, "Rehash error: moving rules failed\n");
- return -ENOMEM;
- }
+ if (ret)
+ mlx5hws_err(ctx, "Rehash error: moving rules failed, attempting to remove the old matcher\n");
+
+ /* Error during rehash can't be rolled back.
+ * The best option here is to allow the rehash to complete and remove
+ * the old matcher - can't leave the matcher in the 'in_resize' state.
+ */
bwc_matcher->matcher = new_matcher;
mlx5hws_matcher_destroy(old_matcher);
- return 0;
+ return ret;
}
static int
@@ -702,9 +744,10 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
/* It is possible that other rule has already performed rehash.
* Need to check again if we really need rehash.
- * If the reason for rehash was size, but not any more - skip rehash.
*/
- if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules))
+ if (!atomic_read(&bwc_matcher->rehash_required) &&
+ !hws_bwc_matcher_rehash_size_needed(bwc_matcher,
+ atomic_read(&bwc_matcher->num_of_rules)))
return 0;
/* Now we're done all the checking - do the rehash:
@@ -714,6 +757,8 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
* - destroy the old matcher
*/
+ atomic_set(&bwc_matcher->rehash_required, false);
+
ret = hws_bwc_matcher_extend_size(bwc_matcher);
if (ret)
return ret;
@@ -721,17 +766,51 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
return hws_bwc_matcher_move(bwc_matcher);
}
-static int
-hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher)
+static int hws_bwc_rule_get_at_idx(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
{
- /* Rehash by action template doesn't require any additional checking.
- * The bwc_matcher already contains the new action template.
- * Just do the usual rehash:
- * - create new matcher
- * - move all the rules to the new matcher
- * - destroy the old matcher
- */
- return hws_bwc_matcher_move(bwc_matcher);
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mutex *queue_lock; /* Protect the queue */
+ int at_idx, ret;
+
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (likely(at_idx >= 0))
+ return at_idx;
+
+ /* we need to extend BWC matcher action templates array */
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
+
+ /* check again - perhaps other thread already did extend_at */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (at_idx >= 0)
+ goto out;
+
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "BWC rule: failed extending AT (%d)", ret);
+ at_idx = -EINVAL;
+ goto out;
+ }
+
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "BWC rule: failed attaching new AT (%d)", ret);
+ at_idx = -EINVAL;
+ goto out;
+ }
+
+out:
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ return at_idx;
}
int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
@@ -754,50 +833,16 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
mutex_lock(queue_lock);
- /* check if rehash needed due to missing action template */
- at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, bwc_queue_idx);
if (unlikely(at_idx < 0)) {
- /* we need to extend BWC matcher action templates array */
mutex_unlock(queue_lock);
- hws_bwc_lock_all_queues(ctx);
-
- ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
- if (unlikely(ret)) {
- hws_bwc_unlock_all_queues(ctx);
- return ret;
- }
-
- /* action templates array was extended, we need the last idx */
- at_idx = bwc_matcher->num_of_at - 1;
-
- ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
- bwc_matcher->at[at_idx]);
- if (unlikely(ret)) {
- /* Action template attach failed, possibly due to
- * requiring more action STEs.
- * Need to attempt creating new matcher with all
- * the action templates, including the new one.
- */
- ret = hws_bwc_matcher_rehash_at(bwc_matcher);
- if (unlikely(ret)) {
- mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
- bwc_matcher->at[at_idx] = NULL;
- bwc_matcher->num_of_at--;
-
- hws_bwc_unlock_all_queues(ctx);
-
- mlx5hws_err(ctx,
- "BWC rule insertion: rehash AT failed (%d)\n", ret);
- return ret;
- }
- }
-
- hws_bwc_unlock_all_queues(ctx);
- mutex_lock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule create: failed getting AT (%d)",
+ ret);
+ return -EINVAL;
}
/* check if number of rules require rehash */
- num_of_rules = bwc_matcher->num_of_rules;
+ num_of_rules = atomic_inc_return(&bwc_matcher->num_of_rules);
if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
mutex_unlock(queue_lock);
@@ -811,6 +856,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
bwc_matcher->size_log,
ret);
+ atomic_dec(&bwc_matcher->num_of_rules);
return ret;
}
@@ -835,6 +881,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
* Try rehash by size and insert rule again - last chance.
*/
+ atomic_set(&bwc_matcher->rehash_required, true);
mutex_unlock(queue_lock);
hws_bwc_lock_all_queues(ctx);
@@ -843,6 +890,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
if (ret) {
mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
+ atomic_dec(&bwc_matcher->num_of_rules);
return ret;
}
@@ -858,6 +906,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret)) {
mutex_unlock(queue_lock);
mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
+ atomic_dec(&bwc_matcher->num_of_rules);
return ret;
}
@@ -889,11 +938,18 @@ mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
- ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
- params->match_buf,
- rule_actions,
- flow_source,
- bwc_queue_idx);
+ if (bwc_matcher->complex)
+ ret = mlx5hws_bwc_rule_create_complex(bwc_rule,
+ params,
+ flow_source,
+ rule_actions,
+ bwc_queue_idx);
+ else
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+ params->match_buf,
+ rule_actions,
+ flow_source,
+ bwc_queue_idx);
if (unlikely(ret)) {
mlx5hws_bwc_rule_free(bwc_rule);
return NULL;
@@ -920,52 +976,11 @@ hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
mutex_lock(queue_lock);
- /* check if rehash needed due to missing action template */
- at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, idx);
if (unlikely(at_idx < 0)) {
- /* we need to extend BWC matcher action templates array */
mutex_unlock(queue_lock);
- hws_bwc_lock_all_queues(ctx);
-
- /* check again - perhaps other thread already did extend_at */
- at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
- if (likely(at_idx < 0)) {
- ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
- if (unlikely(ret)) {
- hws_bwc_unlock_all_queues(ctx);
- mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret);
- return -EINVAL;
- }
-
- /* action templates array was extended, we need the last idx */
- at_idx = bwc_matcher->num_of_at - 1;
-
- ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
- bwc_matcher->at[at_idx]);
- if (unlikely(ret)) {
- /* Action template attach failed, possibly due to
- * requiring more action STEs.
- * Need to attempt creating new matcher with all
- * the action templates, including the new one.
- */
- ret = hws_bwc_matcher_rehash_at(bwc_matcher);
- if (unlikely(ret)) {
- mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
- bwc_matcher->at[at_idx] = NULL;
- bwc_matcher->num_of_at--;
-
- hws_bwc_unlock_all_queues(ctx);
-
- mlx5hws_err(ctx,
- "BWC rule update: rehash AT failed (%d)\n",
- ret);
- return ret;
- }
- }
- }
-
- hws_bwc_unlock_all_queues(ctx);
- mutex_lock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule update: failed getting AT\n");
+ return -EINVAL;
}
ret = hws_bwc_rule_update_sync(bwc_rule,
@@ -991,5 +1006,10 @@ int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
return -EINVAL;
}
- return hws_bwc_rule_action_update(bwc_rule, rule_actions);
+ /* For complex rule, the update should happen on the second matcher */
+ if (bwc_rule->isolated_bwc_rule)
+ return hws_bwc_rule_action_update(bwc_rule->isolated_bwc_rule,
+ rule_actions);
+ else
+ return hws_bwc_rule_action_update(bwc_rule, rule_actions);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index 0b745968e21e..d21fc247a510 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -8,24 +8,37 @@
#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
-#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
+
+/* Max number of AT attach operations for the same matcher.
+ * When the limit is reached, a larger buffer is allocated for the ATs.
+ */
+#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 8
#define MLX5HWS_BWC_MAX_ACTS 16
+#define MLX5HWS_BWC_POLLING_TIMEOUT 60
+
+struct mlx5hws_bwc_matcher_complex_data;
struct mlx5hws_bwc_matcher {
struct mlx5hws_matcher *matcher;
struct mlx5hws_match_template *mt;
- struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
+ struct mlx5hws_action_template **at;
+ struct mlx5hws_bwc_matcher_complex_data *complex;
+ struct mlx5hws_bwc_matcher *complex_first_bwc_matcher;
u8 num_of_at;
- u16 priority;
+ u8 size_of_at_array;
u8 size_log;
- u32 num_of_rules; /* atomically accessed */
+ u32 priority;
+ atomic_t num_of_rules;
+ atomic_t rehash_required;
struct list_head *rules;
};
struct mlx5hws_bwc_rule {
struct mlx5hws_bwc_matcher *bwc_matcher;
struct mlx5hws_rule *rule;
+ struct mlx5hws_bwc_rule *isolated_bwc_rule;
+ struct mlx5hws_bwc_complex_rule_hash_node *complex_hash_node;
u16 bwc_queue_idx;
struct list_head list_node;
};
@@ -57,12 +70,19 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u32 flow_source,
struct mlx5hws_rule_attr *rule_attr);
+int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain);
+
static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
{
/* Besides the control queue, half of the queues are
- * reguler HWS queues, and the other half are BWC queues.
+ * regular HWS queues, and the other half are BWC queues.
*/
- return (ctx->queues - 1) / 2;
+ if (mlx5hws_context_bwc_supported(ctx))
+ return (ctx->queues - 1) / 2;
+ return 0;
}
static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
index c00010ca86bd..ca7501c57468 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
@@ -3,6 +3,22 @@
#include "internal.h"
+#define HWS_CLEAR_MATCH_PARAM(mask, field) \
+ MLX5_SET(fte_match_param, (mask)->match_buf, field, 0)
+
+#define HWS_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
+
+static const struct rhashtable_params hws_refcount_hash = {
+ .key_len = sizeof_field(struct mlx5hws_bwc_complex_rule_hash_node,
+ match_buf),
+ .key_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node,
+ match_buf),
+ .head_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node,
+ hash_node),
+ .automatic_shrinking = true,
+ .min_size = 1,
+};
+
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask)
@@ -39,6 +55,8 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
} else {
mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
}
+ } else {
+ kfree(mt->fc);
}
mlx5hws_match_template_destroy(mt);
@@ -46,20 +64,1093 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
return is_complex;
}
+static void
+hws_bwc_matcher_complex_params_clear_fld(struct mlx5hws_context *ctx,
+ enum mlx5hws_definer_fname fname,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ switch (fname) {
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O:
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I:
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O:
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I:
+ case MLX5HWS_DEFINER_FNAME_IP_VERSION_O:
+ case MLX5HWS_DEFINER_FNAME_IP_VERSION_I:
+ /* Because of the strict requirements for IP address matching
+ * that require ethtype/ip_version matching as well, don't clear
+ * these fields - have them in both parts of the complex matcher
+ */
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_47_16);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_47_16);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_15_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_15_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_47_16);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_47_16);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_15_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_15_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.cvlan_tag);
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.svlan_tag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.cvlan_tag);
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.svlan_tag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_prio);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_prio);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_CFI_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_cfi);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_CFI_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_cfi);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_ID_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_vid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_ID_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_vid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.outer_second_cvlan_tag);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.outer_second_svlan_tag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.inner_second_cvlan_tag);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.inner_second_svlan_tag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_prio);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_prio);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_cfi);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_cfi);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_vid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_vid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_IHL_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ipv4_ihl);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_IHL_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ipv4_ihl);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_DSCP_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_dscp);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_DSCP_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_dscp);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_ECN_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_ecn);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_ECN_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_ecn);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_TTL_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ttl_hoplimit);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_TTL_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ttl_hoplimit);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_DST_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_SRC_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_DST_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_SRC_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_FRAG_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.frag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_FRAG_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.frag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.outer_ipv6_flow_label);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.inner_ipv6_flow_label);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_protocol);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_protocol);
+ break;
+ case MLX5HWS_DEFINER_FNAME_L4_SPORT_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_sport);
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_sport);
+ break;
+ case MLX5HWS_DEFINER_FNAME_L4_SPORT_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport);
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport);
+ break;
+ case MLX5HWS_DEFINER_FNAME_L4_DPORT_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_dport);
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_dport);
+ break;
+ case MLX5HWS_DEFINER_FNAME_L4_DPORT_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport);
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_flags);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM:
+ case MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_tcp_seq_num);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_tcp_ack_num);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.inner_tcp_seq_num);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.inner_tcp_ack_num);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GTP_TEID:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_teid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_type);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_flags);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.gtpu_first_ext_dw_0);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GTPU_DW2:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_2);
+ break;
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_2.outer_first_mpls_over_gre);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_2.outer_first_mpls_over_udp);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.geneve_tlv_option_0_data);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_id_0);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_value_0);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_value_1);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_id_2);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_value_2);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_id_3);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_value_3);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_VNI:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.vxlan_vni);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_vxlan_gpe_flags);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0:
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_vxlan_gpe_next_protocol);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_vxlan_gpe_vni);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_opt_len);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GENEVE_OAM:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_oam);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GENEVE_PROTO:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.geneve_protocol_type);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GENEVE_VNI:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_vni);
+ break;
+ case MLX5HWS_DEFINER_FNAME_SOURCE_QP:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_sqn);
+ break;
+ case MLX5HWS_DEFINER_FNAME_SOURCE_GVMI:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_port);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.source_eswitch_owner_vhca_id);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_0:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_1:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_1);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_2:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_2);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_3:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_3);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_4:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_4);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_5:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_5);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_7:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_7);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_A:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_a);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_C:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_c_present);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_K:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_k_present);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_S:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_s_present);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_protocol);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_key.key);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ICMP_DW1:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_header_data);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_type);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_code);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.icmpv6_header_data);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_type);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_code);
+ break;
+ case MLX5HWS_DEFINER_FNAME_MPLS0_O:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.outer_first_mpls);
+ break;
+ case MLX5HWS_DEFINER_FNAME_MPLS0_I:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.inner_first_mpls);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TNL_HDR_0:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TNL_HDR_1:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_1);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TNL_HDR_2:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_2);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TNL_HDR_3:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_3);
+ break;
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK:
+ /* assuming this is flex parser for geneve option */
+ if ((fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 0) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 1) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 2) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 3) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 4) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 5) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 6) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 7)) {
+ mlx5hws_err(ctx,
+ "Complex params: unsupported field %s (%d), flex parser ID for geneve is %d\n",
+ mlx5hws_definer_fname_to_str(fname), fname,
+ caps->flex_parser_id_geneve_tlv_option_0);
+ break;
+ }
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.geneve_tlv_option_0_exist);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_6:
+ default:
+ mlx5hws_err(ctx, "Complex params: unsupported field %s (%d)\n",
+ mlx5hws_definer_fname_to_str(fname), fname);
+ break;
+ }
+}
+
+static bool
+hws_bwc_matcher_complex_params_comb_is_valid(struct mlx5hws_definer_fc *fc,
+ int fc_sz,
+ u32 combination_num)
+{
+ bool m1[MLX5HWS_DEFINER_FNAME_MAX] = {0};
+ bool m2[MLX5HWS_DEFINER_FNAME_MAX] = {0};
+ bool is_first_matcher;
+ int i;
+
+ for (i = 0; i < fc_sz; i++) {
+ is_first_matcher = !(combination_num & BIT(i));
+ if (is_first_matcher)
+ m1[fc[i].fname] = true;
+ else
+ m2[fc[i].fname] = true;
+ }
+
+ /* Not all the fields can be split into separate matchers.
+ * Some should be together on the same matcher.
+ * For example, IPv6 parts - the whole IPv6 address should be on the
+ * same matcher in order for us to deduce if it's IPv6 or IPv4 address.
+ */
+ if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] &&
+ (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O]))
+ return false;
+
+ if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] &&
+ (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O]))
+ return false;
+
+ if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] &&
+ (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I]))
+ return false;
+
+ if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] &&
+ (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I]))
+ return false;
+
+ /* Don't split outer IPv6 dest address. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O]))
+ return false;
+
+ /* Don't split outer IPv6 source address. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O]))
+ return false;
+
+ /* Don't split inner IPv6 dest address. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I]))
+ return false;
+
+ /* Don't split inner IPv6 source address. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I]))
+ return false;
+
+ /* Don't split GRE parameters. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_GRE_C] ||
+ m1[MLX5HWS_DEFINER_FNAME_GRE_K] ||
+ m1[MLX5HWS_DEFINER_FNAME_GRE_S] ||
+ m1[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_GRE_C] ||
+ m2[MLX5HWS_DEFINER_FNAME_GRE_K] ||
+ m2[MLX5HWS_DEFINER_FNAME_GRE_S] ||
+ m2[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]))
+ return false;
+
+ /* Don't split TCP ack/seq numbers. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] ||
+ m1[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] ||
+ m2[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM]))
+ return false;
+
+ /* Don't split flex parser. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]))
+ return false;
+
+ return true;
+}
+
+static void
+hws_bwc_matcher_complex_params_comb_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_parameters *m,
+ struct mlx5hws_match_parameters *m1,
+ struct mlx5hws_match_parameters *m2,
+ struct mlx5hws_definer_fc *fc,
+ int fc_sz,
+ u32 combination_num)
+{
+ bool is_first_matcher;
+ int i;
+
+ memcpy(m1->match_buf, m->match_buf, m->match_sz);
+ memcpy(m2->match_buf, m->match_buf, m->match_sz);
+
+ for (i = 0; i < fc_sz; i++) {
+ is_first_matcher = !(combination_num & BIT(i));
+ hws_bwc_matcher_complex_params_clear_fld(ctx,
+ fc[i].fname,
+ is_first_matcher ?
+ m2 : m1);
+ }
+
+ MLX5_SET(fte_match_param, m2->match_buf,
+ misc_parameters_2.metadata_reg_c_6, -1);
+}
+
+static void
+hws_bwc_matcher_complex_params_destroy(struct mlx5hws_match_parameters *mask_1,
+ struct mlx5hws_match_parameters *mask_2)
+{
+ kfree(mask_1->match_buf);
+ kfree(mask_2->match_buf);
+}
+
+static int
+hws_bwc_matcher_complex_params_create(struct mlx5hws_context *ctx,
+ u8 match_criteria,
+ struct mlx5hws_match_parameters *mask,
+ struct mlx5hws_match_parameters *mask_1,
+ struct mlx5hws_match_parameters *mask_2)
+{
+ struct mlx5hws_definer_fc *fc;
+ u32 num_of_combinations;
+ int fc_sz = 0;
+ int res = 0;
+ u32 i;
+
+ if (MLX5_GET(fte_match_param, mask->match_buf,
+ misc_parameters_2.metadata_reg_c_6)) {
+ mlx5hws_err(ctx, "Complex matcher: REG_C_6 matching is reserved\n");
+ res = -EINVAL;
+ goto out;
+ }
+
+ mask_1->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param),
+ GFP_KERNEL);
+ mask_2->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param),
+ GFP_KERNEL);
+ if (!mask_1->match_buf || !mask_2->match_buf) {
+ mlx5hws_err(ctx, "Complex matcher: failed to allocate match_param\n");
+ res = -ENOMEM;
+ goto free_params;
+ }
+
+ mask_1->match_sz = mask->match_sz;
+ mask_2->match_sz = mask->match_sz;
+
+ fc = mlx5hws_definer_conv_match_params_to_compressed_fc(ctx,
+ match_criteria,
+ mask->match_buf,
+ &fc_sz);
+ if (!fc) {
+ res = -ENOMEM;
+ goto free_params;
+ }
+
+ if (fc_sz >= sizeof(num_of_combinations) * BITS_PER_BYTE) {
+ mlx5hws_err(ctx,
+ "Complex matcher: too many match parameters (%d)\n",
+ fc_sz);
+ res = -EINVAL;
+ goto free_fc;
+ }
+
+ /* We have list of all the match fields from the match parameter.
+ * Now try all the possibilities of splitting them into two match
+ * buffers and look for the supported combination.
+ */
+ num_of_combinations = 1 << fc_sz;
+
+ /* Start from combination at index 1 - we know that 0 is unsupported */
+ for (i = 1; i < num_of_combinations; i++) {
+ if (!hws_bwc_matcher_complex_params_comb_is_valid(fc, fc_sz, i))
+ continue;
+
+ hws_bwc_matcher_complex_params_comb_create(ctx,
+ mask, mask_1, mask_2,
+ fc, fc_sz, i);
+ /* We now have two separate sets of match params.
+ * Check if each of them can be used in its own matcher.
+ */
+ if (!mlx5hws_bwc_match_params_is_complex(ctx,
+ match_criteria,
+ mask_1) &&
+ !mlx5hws_bwc_match_params_is_complex(ctx,
+ match_criteria,
+ mask_2))
+ break;
+ }
+
+ if (i == num_of_combinations) {
+ /* We've scanned all the combinations, but to no avail */
+ mlx5hws_err(ctx, "Complex matcher: couldn't find match params combination\n");
+ res = -EINVAL;
+ goto free_fc;
+ }
+
+ kfree(fc);
+ return 0;
+
+free_fc:
+ kfree(fc);
+free_params:
+ hws_bwc_matcher_complex_params_destroy(mask_1, mask_2);
+out:
+ return res;
+}
+
+static int
+hws_bwc_isolated_table_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ struct mlx5hws_context *ctx = table->ctx;
+ struct mlx5hws_table_attr tbl_attr = {0};
+ struct mlx5hws_table *isolated_tbl;
+ int ret = 0;
+
+ tbl_attr.type = table->type;
+ tbl_attr.level = table->level;
+
+ bwc_matcher->complex->isolated_tbl =
+ mlx5hws_table_create(ctx, &tbl_attr);
+ isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ if (!isolated_tbl)
+ return -EINVAL;
+
+ /* Set the default miss of the isolated table to
+ * point to the end anchor of the original matcher.
+ */
+ mlx5hws_cmd_set_attr_connect_miss_tbl(ctx,
+ isolated_tbl->fw_ft_type,
+ isolated_tbl->type,
+ &ft_attr);
+ ft_attr.table_miss_id = bwc_matcher->matcher->end_ft_id;
+
+ ret = mlx5hws_cmd_flow_table_modify(ctx->mdev,
+ &ft_attr,
+ isolated_tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed setting isolated tbl default miss\n");
+ goto destroy_tbl;
+ }
+
+ return 0;
+
+destroy_tbl:
+ mlx5hws_table_destroy(isolated_tbl);
+ return ret;
+}
+
+static void hws_bwc_isolated_table_destroy(struct mlx5hws_table *isolated_tbl)
+{
+ /* This table is isolated - no table is pointing to it, no need to
+ * disconnect it from anywhere, it won't affect any other table's miss.
+ */
+ mlx5hws_table_destroy(isolated_tbl);
+}
+
+static int
+hws_bwc_isolated_matcher_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ struct mlx5hws_context *ctx = table->ctx;
+ int ret;
+
+ isolated_bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
+ if (!isolated_bwc_matcher)
+ return -ENOMEM;
+
+ bwc_matcher->complex->isolated_bwc_matcher = isolated_bwc_matcher;
+
+ /* Isolated BWC matcher needs access to the first BWC matcher */
+ isolated_bwc_matcher->complex_first_bwc_matcher = bwc_matcher;
+
+ /* Isolated matcher needs to match on REG_C_6,
+ * so make sure its criteria bit is on.
+ */
+ match_criteria_enable |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
+
+ ret = mlx5hws_bwc_matcher_create_simple(isolated_bwc_matcher,
+ isolated_tbl,
+ 0,
+ match_criteria_enable,
+ mask,
+ NULL);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating isolated BWC matcher\n");
+ goto free_matcher;
+ }
+
+ return 0;
+
+free_matcher:
+ kfree(bwc_matcher->complex->isolated_bwc_matcher);
+ return ret;
+}
+
+static void
+hws_bwc_isolated_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+ kfree(bwc_matcher);
+}
+
+static int
+hws_bwc_isolated_actions_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table)
+{
+ struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
+ struct mlx5hws_context *ctx = table->ctx;
+ struct mlx5hws_action_mh_pattern ptrn;
+ int ret = 0;
+
+ /* Create action to jump to isolated table */
+
+ bwc_matcher->complex->action_go_to_tbl =
+ mlx5hws_action_create_dest_table(ctx,
+ isolated_tbl,
+ MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!bwc_matcher->complex->action_go_to_tbl) {
+ mlx5hws_err(ctx, "Complex matcher: failed to create go-to-tbl action\n");
+ return -EINVAL;
+ }
+
+ /* Create modify header action to set REG_C_6 */
+
+ MLX5_SET(set_action_in, modify_hdr_action,
+ action_type, MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ field, MLX5_MODI_META_REG_C_6);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ length, 0); /* zero means length of 32 */
+ MLX5_SET(set_action_in, modify_hdr_action, offset, 0);
+ MLX5_SET(set_action_in, modify_hdr_action, data, 0);
+
+ ptrn.data = (void *)modify_hdr_action;
+ ptrn.sz = MLX5HWS_ACTION_DOUBLE_SIZE;
+
+ bwc_matcher->complex->action_metadata =
+ mlx5hws_action_create_modify_header(ctx, 1, &ptrn, 0,
+ MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!bwc_matcher->complex->action_metadata) {
+ ret = -EINVAL;
+ goto destroy_action_go_to_tbl;
+ }
+
+ /* Create last action */
+
+ bwc_matcher->complex->action_last =
+ mlx5hws_action_create_last(ctx, MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!bwc_matcher->complex->action_last) {
+ mlx5hws_err(ctx, "Complex matcher: failed to create last action\n");
+ ret = -EINVAL;
+ goto destroy_action_metadata;
+ }
+
+ return 0;
+
+destroy_action_metadata:
+ mlx5hws_action_destroy(bwc_matcher->complex->action_metadata);
+destroy_action_go_to_tbl:
+ mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl);
+ return ret;
+}
+
+static void
+hws_bwc_isolated_actions_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mlx5hws_action_destroy(bwc_matcher->complex->action_last);
+ mlx5hws_action_destroy(bwc_matcher->complex->action_metadata);
+ mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl);
+}
+
int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_table *table,
u32 priority,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask)
{
- mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n");
- return -EOPNOTSUPP;
+ enum mlx5hws_action_type complex_init_action_types[3];
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ struct mlx5hws_match_parameters mask_1 = {0};
+ struct mlx5hws_match_parameters mask_2 = {0};
+ struct mlx5hws_context *ctx = table->ctx;
+ int ret;
+
+ ret = hws_bwc_matcher_complex_params_create(table->ctx,
+ match_criteria_enable,
+ mask, &mask_1, &mask_2);
+ if (ret)
+ goto err;
+
+ bwc_matcher->complex =
+ kzalloc(sizeof(*bwc_matcher->complex), GFP_KERNEL);
+ if (!bwc_matcher->complex) {
+ ret = -ENOMEM;
+ goto free_masks;
+ }
+
+ ret = rhashtable_init(&bwc_matcher->complex->refcount_hash,
+ &hws_refcount_hash);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed to initialize rhashtable\n");
+ goto free_complex;
+ }
+
+ mutex_init(&bwc_matcher->complex->hash_lock);
+ ida_init(&bwc_matcher->complex->metadata_ida);
+
+ /* Create initial action template for the first matcher.
+ * Usually the initial AT is just dummy, but in case of complex
+ * matcher we know exactly which actions should it have.
+ */
+
+ complex_init_action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
+ complex_init_action_types[1] = MLX5HWS_ACTION_TYP_TBL;
+ complex_init_action_types[2] = MLX5HWS_ACTION_TYP_LAST;
+
+ /* Create the first matcher */
+
+ ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ &mask_1,
+ complex_init_action_types);
+ if (ret)
+ goto destroy_ida;
+
+ /* Create isolated table to hold the second isolated matcher */
+
+ ret = hws_bwc_isolated_table_create(bwc_matcher, table);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating isolated table\n");
+ goto destroy_first_matcher;
+ }
+
+ /* Now create the second BWC matcher - the isolated one */
+
+ ret = hws_bwc_isolated_matcher_create(bwc_matcher, table,
+ match_criteria_enable, &mask_2);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating isolated matcher\n");
+ goto destroy_isolated_tbl;
+ }
+
+ /* Create action for isolated matcher's rules */
+
+ ret = hws_bwc_isolated_actions_create(bwc_matcher, table);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating isolated actions\n");
+ goto destroy_isolated_matcher;
+ }
+
+ hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2);
+ return 0;
+
+destroy_isolated_matcher:
+ isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher;
+ hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher);
+destroy_isolated_tbl:
+ hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl);
+destroy_first_matcher:
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+destroy_ida:
+ ida_destroy(&bwc_matcher->complex->metadata_ida);
+ mutex_destroy(&bwc_matcher->complex->hash_lock);
+ rhashtable_destroy(&bwc_matcher->complex->refcount_hash);
+free_complex:
+ kfree(bwc_matcher->complex);
+ bwc_matcher->complex = NULL;
+free_masks:
+ hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2);
+err:
+ return ret;
}
void
mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- /* nothing to do here */
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher =
+ bwc_matcher->complex->isolated_bwc_matcher;
+
+ hws_bwc_isolated_actions_destroy(bwc_matcher);
+ hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher);
+ hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl);
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+ ida_destroy(&bwc_matcher->complex->metadata_ida);
+ mutex_destroy(&bwc_matcher->complex->hash_lock);
+ rhashtable_destroy(&bwc_matcher->complex->refcount_hash);
+ kfree(bwc_matcher->complex);
+ bwc_matcher->complex = NULL;
+}
+
+static void
+hws_bwc_matcher_complex_hash_lock(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mutex_lock(&bwc_matcher->complex->hash_lock);
+}
+
+static void
+hws_bwc_matcher_complex_hash_unlock(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mutex_unlock(&bwc_matcher->complex->hash_lock);
+}
+
+static int
+hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_complex_rule_hash_node *node, *old_node;
+ struct rhashtable *refcount_hash;
+ int ret, i;
+
+ bwc_rule->complex_hash_node = NULL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(!node))
+ return -ENOMEM;
+
+ ret = ida_alloc(&bwc_matcher->complex->metadata_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto err_free_node;
+ node->tag = ret;
+
+ refcount_set(&node->refcount, 1);
+
+ /* Clear match buffer - turn off all the unrelated fields
+ * in accordance with the match params mask for the first
+ * matcher out of the two parts of the complex matcher.
+ * The resulting mask is the key for the hash.
+ */
+ for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
+ node->match_buf[i] = params->match_buf[i] &
+ bwc_matcher->mt->match_param[i];
+
+ refcount_hash = &bwc_matcher->complex->refcount_hash;
+ old_node = rhashtable_lookup_get_insert_fast(refcount_hash,
+ &node->hash_node,
+ hws_refcount_hash);
+ if (IS_ERR(old_node)) {
+ ret = PTR_ERR(old_node);
+ goto err_free_ida;
+ }
+
+ if (old_node) {
+ /* Rule with the same tag already exists - update refcount */
+ refcount_inc(&old_node->refcount);
+ /* Let the new rule use the same tag as the existing rule.
+ * Note that we don't have any indication for the rule creation
+ * process that a rule with similar matching params already
+ * exists - no harm done when this rule is be overwritten by
+ * the same STE.
+ * There's some performance advantage in skipping such cases,
+ * so this is left for future optimizations.
+ */
+ ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
+ kfree(node);
+ node = old_node;
+ }
+
+ bwc_rule->complex_hash_node = node;
+ return 0;
+
+err_free_ida:
+ ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
+err_free_node:
+ kfree(node);
+ return ret;
+}
+
+static void
+hws_bwc_rule_complex_hash_node_put(struct mlx5hws_bwc_rule *bwc_rule,
+ bool *is_last_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_complex_rule_hash_node *node;
+
+ if (is_last_rule)
+ *is_last_rule = false;
+
+ node = bwc_rule->complex_hash_node;
+ if (refcount_dec_and_test(&node->refcount)) {
+ rhashtable_remove_fast(&bwc_matcher->complex->refcount_hash,
+ &node->hash_node,
+ hws_refcount_hash);
+ ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
+ kfree(node);
+ if (is_last_rule)
+ *is_last_rule = true;
+ }
+
+ bwc_rule->complex_hash_node = NULL;
}
int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
@@ -68,19 +1159,271 @@ int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_rule_action rule_actions[],
u16 bwc_queue_idx)
{
- mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx,
- "Complex rule is not supported yet\n");
- return -EOPNOTSUPP;
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
+ struct mlx5hws_rule_action rule_actions_1[3] = {0};
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ u32 *match_buf_2;
+ u32 metadata_val;
+ int ret = 0;
+
+ isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher;
+ bwc_rule->isolated_bwc_rule =
+ mlx5hws_bwc_rule_alloc(isolated_bwc_matcher);
+ if (unlikely(!bwc_rule->isolated_bwc_rule))
+ return -ENOMEM;
+
+ hws_bwc_matcher_complex_hash_lock(bwc_matcher);
+
+ /* Get a new hash node for this complex rule.
+ * If this is a unique set of match params for the first matcher,
+ * we will get a new hash node with newly allocated IDA.
+ * Otherwise we will get an existing node with IDA and updated refcount.
+ */
+ ret = hws_bwc_rule_complex_hash_node_get(bwc_rule, params);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "Complex rule: failed getting RHT node for this rule\n");
+ goto free_isolated_rule;
+ }
+
+ /* No need to clear match buffer's fields in accordance to what
+ * will actually be matched on first and second matchers.
+ * Both matchers were created with the appropriate masks
+ * and each of them holds the appropriate field copy array,
+ * so rule creation will use only the fields that will be copied
+ * in accordance with setters in field copy array.
+ * We do, however, need to temporary allocate match buffer
+ * for the second (isolated) rule in order to not modify
+ * user's match params buffer.
+ */
+
+ match_buf_2 = kmemdup(params->match_buf,
+ MLX5_ST_SZ_BYTES(fte_match_param),
+ GFP_KERNEL);
+ if (unlikely(!match_buf_2)) {
+ mlx5hws_err(ctx, "Complex rule: failed allocating match_buf\n");
+ ret = -ENOMEM;
+ goto hash_node_put;
+ }
+
+ /* On 2nd matcher, use unique 32-bit ID as a matching tag */
+ metadata_val = bwc_rule->complex_hash_node->tag;
+ MLX5_SET(fte_match_param, match_buf_2,
+ misc_parameters_2.metadata_reg_c_6, metadata_val);
+
+ /* Isolated rule's rule_actions contain all the original actions */
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule->isolated_bwc_rule,
+ match_buf_2,
+ rule_actions,
+ flow_source,
+ bwc_queue_idx);
+ kfree(match_buf_2);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Complex rule: failed creating isolated BWC rule (%d)\n",
+ ret);
+ goto hash_node_put;
+ }
+
+ /* First rule's rule_actions contain setting metadata and
+ * jump to isolated table that contains the second matcher.
+ * Set metadata value to a unique value for this rule.
+ */
+
+ MLX5_SET(set_action_in, modify_hdr_action,
+ action_type, MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ field, MLX5_MODI_META_REG_C_6);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ length, 0); /* zero means length of 32 */
+ MLX5_SET(set_action_in, modify_hdr_action,
+ offset, 0);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ data, metadata_val);
+
+ rule_actions_1[0].action = bwc_matcher->complex->action_metadata;
+ rule_actions_1[0].modify_header.offset = 0;
+ rule_actions_1[0].modify_header.data = modify_hdr_action;
+
+ rule_actions_1[1].action = bwc_matcher->complex->action_go_to_tbl;
+ rule_actions_1[2].action = bwc_matcher->complex->action_last;
+
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+ params->match_buf,
+ rule_actions_1,
+ flow_source,
+ bwc_queue_idx);
+
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Complex rule: failed creating first BWC rule (%d)\n",
+ ret);
+ goto destroy_isolated_rule;
+ }
+
+ hws_bwc_matcher_complex_hash_unlock(bwc_matcher);
+
+ return 0;
+
+destroy_isolated_rule:
+ mlx5hws_bwc_rule_destroy_simple(bwc_rule->isolated_bwc_rule);
+hash_node_put:
+ hws_bwc_rule_complex_hash_node_put(bwc_rule, NULL);
+free_isolated_rule:
+ hws_bwc_matcher_complex_hash_unlock(bwc_matcher);
+ mlx5hws_bwc_rule_free(bwc_rule->isolated_bwc_rule);
+ return ret;
}
int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
{
- return 0;
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_bwc_rule *isolated_bwc_rule;
+ int ret_isolated, ret;
+ bool is_last_rule;
+
+ hws_bwc_matcher_complex_hash_lock(bwc_rule->bwc_matcher);
+
+ hws_bwc_rule_complex_hash_node_put(bwc_rule, &is_last_rule);
+ bwc_rule->rule->skip_delete = !is_last_rule;
+
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "BWC complex rule: failed destroying first rule\n");
+
+ isolated_bwc_rule = bwc_rule->isolated_bwc_rule;
+ ret_isolated = mlx5hws_bwc_rule_destroy_simple(isolated_bwc_rule);
+ if (unlikely(ret_isolated))
+ mlx5hws_err(ctx, "BWC complex rule: failed destroying second (isolated) rule\n");
+
+ hws_bwc_matcher_complex_hash_unlock(bwc_rule->bwc_matcher);
+
+ mlx5hws_bwc_rule_free(isolated_bwc_rule);
+
+ return ret || ret_isolated;
}
-int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+static void
+hws_bwc_matcher_clear_hash_rtcs(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
- "Moving complex rule is not supported yet\n");
- return -EOPNOTSUPP;
+ struct mlx5hws_bwc_complex_rule_hash_node *node;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&bwc_matcher->complex->refcount_hash, &iter);
+ rhashtable_walk_start(&iter);
+
+ while ((node = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(node))
+ continue;
+ node->rtc_valid = false;
+ }
+
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+int
+mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
+ bool move_error = false, poll_error = false;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule *tmp_bwc_rule;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mlx5hws_table *isolated_tbl;
+ struct mlx5hws_rule *tmp_rule;
+ struct list_head *rules_list;
+ u32 expected_completions = 1;
+ u32 end_ft_id;
+ int i, ret;
+
+ /* We are rehashing the matcher that is the first part of the complex
+ * matcher. Need to update the isolated matcher to point to the end_ft
+ * of this new matcher. This needs to be done before moving any rules
+ * to prevent possible steering loops.
+ */
+ isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ end_ft_id = bwc_matcher->matcher->resize_dst->end_ft_id;
+ ret = mlx5hws_matcher_update_end_ft_isolated(isolated_tbl, end_ft_id);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed updating end_ft of isolated matcher (%d)\n",
+ ret);
+ return ret;
+ }
+
+ hws_bwc_matcher_clear_hash_rtcs(bwc_matcher);
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+ for (i = 0; i < bwc_queues; i++) {
+ rules_list = &bwc_matcher->rules[i];
+ if (list_empty(rules_list))
+ continue;
+
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+
+ list_for_each_entry(tmp_bwc_rule, rules_list, list_node) {
+ /* Check if a rule with similar tag has already
+ * been moved.
+ */
+ if (tmp_bwc_rule->complex_hash_node->rtc_valid) {
+ /* This rule is a duplicate of rule with similar
+ * tag that has already been moved earlier.
+ * Just update this rule's RTCs.
+ */
+ tmp_bwc_rule->rule->rtc_0 =
+ tmp_bwc_rule->complex_hash_node->rtc_0;
+ tmp_bwc_rule->rule->rtc_1 =
+ tmp_bwc_rule->complex_hash_node->rtc_1;
+ tmp_bwc_rule->rule->matcher =
+ tmp_bwc_rule->rule->matcher->resize_dst;
+ continue;
+ }
+
+ /* First time we're moving rule with this tag.
+ * Move it for real.
+ */
+ tmp_rule = tmp_bwc_rule->rule;
+ tmp_rule->skip_delete = false;
+ ret = mlx5hws_matcher_resize_rule_move(matcher,
+ tmp_rule,
+ &rule_attr);
+ if (unlikely(ret && !move_error)) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = true;
+ }
+
+ expected_completions = 1;
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &expected_completions,
+ true);
+ if (unlikely(ret && !poll_error)) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: poll failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = true;
+ }
+
+ /* Done moving the rule to the new matcher,
+ * now update RTCs for all the duplicated rules.
+ */
+ tmp_bwc_rule->complex_hash_node->rtc_0 =
+ tmp_bwc_rule->rule->rtc_0;
+ tmp_bwc_rule->complex_hash_node->rtc_1 =
+ tmp_bwc_rule->rule->rtc_1;
+
+ tmp_bwc_rule->complex_hash_node->rtc_valid = true;
+ }
+ }
+
+ if (move_error || poll_error)
+ ret = -EINVAL;
+
+ return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
index 340f0688e394..a6887c7e39d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
@@ -4,6 +4,27 @@
#ifndef HWS_BWC_COMPLEX_H_
#define HWS_BWC_COMPLEX_H_
+struct mlx5hws_bwc_complex_rule_hash_node {
+ u32 match_buf[MLX5_ST_SZ_DW_MATCH_PARAM];
+ u32 tag;
+ refcount_t refcount;
+ bool rtc_valid;
+ u32 rtc_0;
+ u32 rtc_1;
+ struct rhash_head hash_node;
+};
+
+struct mlx5hws_bwc_matcher_complex_data {
+ struct mlx5hws_table *isolated_tbl;
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ struct mlx5hws_action *action_metadata;
+ struct mlx5hws_action *action_go_to_tbl;
+ struct mlx5hws_action *action_last;
+ struct rhashtable refcount_hash;
+ struct mutex hash_lock; /* Protect the refcount rhashtable */
+ struct ida metadata_ida;
+};
+
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index c00c138c3366..9c83753e4592 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -130,12 +130,6 @@ int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
}
-void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
- u32 table_id)
-{
- hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id);
-}
-
static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_fg_attr *fg_attr,
u32 *group_id)
@@ -257,6 +251,12 @@ int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
dest->ext_reformat_id);
}
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ MLX5_SET(dest_format, in_dests,
+ destination_type, ifc_dest_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ break;
default:
ret = -EOPNOTSUPP;
goto out;
@@ -359,7 +359,7 @@ void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
ft_attr->type = fw_ft_type;
ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
- default_miss_tbl = ctx->common_res[type].default_miss->ft_id;
+ default_miss_tbl = ctx->common_res.default_miss->ft_id;
if (!default_miss_tbl) {
pr_warn("HWS: no flow table ID for default miss\n");
return;
@@ -406,7 +406,6 @@ int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
- MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode);
@@ -622,12 +621,12 @@ int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
u32 pd,
u32 *arg_id)
{
+ u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
void *attr;
int ret;
- attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr,
attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
@@ -635,8 +634,8 @@ int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
MLX5_SET(general_obj_in_cmd_hdr,
attr, op_param.create.log_obj_range, log_obj_range);
- attr = MLX5_ADDR_OF(create_arg_in, in, arg);
- MLX5_SET(arg, attr, access_pd, pd);
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg);
+ MLX5_SET(modify_header_arg, attr, access_pd, pd);
ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (ret) {
@@ -812,7 +811,7 @@ int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_packet_reformat_create_attr *attr,
u32 *reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {0};
size_t insz, cmd_data_sz, cmd_total_sz;
void *prctx;
void *pdata;
@@ -845,7 +844,7 @@ int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
goto out;
}
- *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+ *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
out:
kfree(in);
return ret;
@@ -854,13 +853,13 @@ out:
int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
u32 reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {0};
int ret;
- MLX5_SET(dealloc_packet_reformat_in, in, opcode,
+ MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
- MLX5_SET(dealloc_packet_reformat_in, in,
+ MLX5_SET(dealloc_packet_reformat_context_in, in,
packet_reformat_id, reformat_id);
ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
@@ -889,73 +888,6 @@ int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn)
return ret;
}
-int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_allow_other_vhca_access_attr *attr)
-{
- u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
- void *key;
- int ret;
-
- MLX5_SET(allow_other_vhca_access_in,
- in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
- MLX5_SET(allow_other_vhca_access_in,
- in, object_type_to_be_accessed, attr->obj_type);
- MLX5_SET(allow_other_vhca_access_in,
- in, object_id_to_be_accessed, attr->obj_id);
-
- key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
- memcpy(key, attr->access_key, sizeof(attr->access_key));
-
- ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (ret)
- mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n");
-
- return ret;
-}
-
-int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
- u32 *obj_id)
-{
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
- void *attr;
- void *key;
- int ret;
-
- attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
- MLX5_SET(general_obj_in_cmd_hdr,
- attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr,
- attr, obj_type, alias_attr->obj_type);
- MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1);
-
- attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
- MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
- MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
-
- key = MLX5_ADDR_OF(alias_context, attr, access_key);
- memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
-
- ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (ret) {
- mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n");
- goto out;
- }
-
- *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-out:
- return ret;
-}
-
-int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
- u16 obj_type,
- u32 obj_id)
-{
- return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id);
-}
-
int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_generate_wqe_attr *attr,
struct mlx5_cqe64 *ret_cqe)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index 434f62b0904e..fa6bff210266 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -63,14 +63,13 @@ struct mlx5hws_cmd_forward_tbl {
u8 type;
u32 ft_id;
u32 fg_id;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_cmd_rtc_create_attr {
u32 pd;
u32 stc_base;
u32 ste_base;
- u32 ste_offset;
u32 miss_ft_id;
bool fw_gen_wqe;
u8 update_index_mode;
@@ -258,9 +257,6 @@ int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
u8 fw_ft_type, u32 table_id);
-void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
- u32 table_id);
-
int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
u32 *rtc_id);
@@ -334,14 +330,6 @@ mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_forward_tbl *tbl);
-int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
- u32 *obj_id);
-
-int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
- u16 obj_type,
- u32 obj_id);
-
int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn);
int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
@@ -352,9 +340,6 @@ void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
enum mlx5hws_table_type type,
struct mlx5hws_cmd_ft_modify_attr *ft_attr);
-int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_allow_other_vhca_access_attr *attr);
-
int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
u16 vport_number, u16 *gvmi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
index fd48b05e91e0..428dae869706 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
@@ -23,7 +23,6 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx)
struct mlx5hws_pool_attr pool_attr = {0};
u8 max_log_sz;
int ret;
- int i;
ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
if (ret)
@@ -35,27 +34,20 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx)
/* Create an STC pool per FT type */
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- pool_attr.table_type = i;
- ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
- if (!ctx->stc_pool[i]) {
- mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
- ret = -ENOMEM;
- goto free_stc_pools;
- }
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ ctx->stc_pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!ctx->stc_pool) {
+ mlx5hws_err(ctx, "Failed to allocate STC pool\n");
+ ret = -ENOMEM;
+ goto uninit_cache;
}
return 0;
-free_stc_pools:
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
- if (ctx->stc_pool[i])
- mlx5hws_pool_destroy(ctx->stc_pool[i]);
-
+uninit_cache:
mlx5hws_definer_uninit_cache(ctx->definer_cache);
uninit_pat_cache:
mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
@@ -64,12 +56,8 @@ uninit_pat_cache:
static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
{
- int i;
-
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- if (ctx->stc_pool[i])
- mlx5hws_pool_destroy(ctx->stc_pool[i]);
- }
+ if (ctx->stc_pool)
+ mlx5hws_pool_destroy(ctx->stc_pool);
mlx5hws_definer_uninit_cache(ctx->definer_cache);
mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
@@ -161,17 +149,25 @@ static int hws_context_init_hws(struct mlx5hws_context *ctx,
if (ret)
goto uninit_pd;
- if (attr->bwc)
- ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+ /* Context has support for backward compatible API,
+ * and does not have support for native HWS API.
+ */
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
if (ret)
goto pools_uninit;
+ ret = mlx5hws_action_ste_pool_init(ctx);
+ if (ret)
+ goto close_queues;
+
INIT_LIST_HEAD(&ctx->tbl_list);
return 0;
+close_queues:
+ mlx5hws_send_queues_close(ctx);
pools_uninit:
hws_context_pools_uninit(ctx);
uninit_pd:
@@ -184,6 +180,7 @@ static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
return;
+ mlx5hws_action_ste_pool_uninit(ctx);
mlx5hws_send_queues_close(ctx);
hws_context_pools_uninit(ctx);
hws_context_uninit_pd(ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
index 47f5cc8de73f..3f8938c73dc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
@@ -8,6 +8,7 @@ enum mlx5hws_context_flags {
MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+ MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT = 1 << 3,
};
enum mlx5hws_context_shared_stc_type {
@@ -37,8 +38,10 @@ struct mlx5hws_context {
struct mlx5_core_dev *mdev;
struct mlx5hws_cmd_query_caps *caps;
u32 pd_num;
- struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX];
- struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pool *stc_pool;
+ struct mlx5hws_action_ste_pool *action_ste_pool; /* One per queue */
+ struct delayed_work action_ste_cleanup;
+ struct mlx5hws_context_common_res common_res;
struct mlx5hws_pattern_cache *pattern_cache;
struct mlx5hws_definer_cache *definer_cache;
struct mutex ctrl_lock; /* control lock to protect the whole context */
@@ -58,6 +61,11 @@ static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
}
+static inline bool mlx5hws_context_native_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT;
+}
+
bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
index 5b200b4bc1a8..91568d6c1dac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
@@ -118,7 +118,6 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
{
enum mlx5hws_table_type tbl_type = matcher->tbl->type;
struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
- struct mlx5hws_pool_chunk *ste;
struct mlx5hws_pool *ste_pool;
u64 icm_addr_0 = 0;
u64 icm_addr_1 = 0;
@@ -134,12 +133,11 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
matcher->end_ft_id,
matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0);
- ste = &matcher->match_ste.ste;
ste_pool = matcher->match_ste.pool;
if (ste_pool) {
- ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ ste_0_id = mlx5hws_pool_get_base_id(ste_pool);
if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
- ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ ste_1_id = mlx5hws_pool_get_base_mirror_id(ste_pool);
}
seq_printf(f, ",%d,%d,%d,%d",
@@ -148,19 +146,6 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
matcher->match_ste.rtc_1_id,
(int)ste_1_id);
- ste = &matcher->action_ste[0].ste;
- ste_pool = matcher->action_ste[0].pool;
- if (ste_pool) {
- ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
- if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
- ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
- else
- ste_1_id = -1;
- } else {
- ste_0_id = -1;
- ste_1_id = -1;
- }
-
ft_attr.type = matcher->tbl->fw_ft_type;
ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev,
matcher->end_ft_id,
@@ -170,12 +155,7 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
if (ret)
return ret;
- seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
- matcher->action_ste[0].rtc_0_id,
- (int)ste_0_id,
- matcher->action_ste[0].rtc_1_id,
- (int)ste_1_id,
- 0,
+ seq_printf(f, ",-1,-1,-1,-1,0,0x%llx,0x%llx\n",
mlx5hws_debug_icm_to_idx(icm_addr_0),
mlx5hws_debug_icm_to_idx(icm_addr_1));
@@ -368,9 +348,10 @@ static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_contex
static int hws_debug_dump_context_stc_resource(struct seq_file *f,
struct mlx5hws_context *ctx,
- u32 tbl_type,
struct mlx5hws_pool_resource *resource)
{
+ u32 tbl_type = MLX5HWS_TABLE_TYPE_BASE + MLX5HWS_TABLE_TYPE_FDB;
+
seq_printf(f, "%d,0x%llx,%u,%u\n",
MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC,
HWS_PTR_TO_ID(ctx),
@@ -382,40 +363,65 @@ static int hws_debug_dump_context_stc_resource(struct seq_file *f,
static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx)
{
- struct mlx5hws_pool *stc_pool;
- u32 table_type;
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
int ret;
- int i;
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- stc_pool = ctx->stc_pool[i];
- table_type = MLX5HWS_TABLE_TYPE_BASE + i;
+ if (!stc_pool)
+ return 0;
- if (!stc_pool)
- continue;
+ if (stc_pool->resource) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx,
+ stc_pool->resource);
+ if (ret)
+ return ret;
+ }
- if (stc_pool->resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
- stc_pool->resource[0]);
- if (ret)
- return ret;
- }
+ if (stc_pool->mirror_resource) {
+ struct mlx5hws_pool_resource *res = stc_pool->mirror_resource;
- if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
- stc_pool->mirror_resource[0]);
- if (ret)
- return ret;
- }
+ ret = hws_debug_dump_context_stc_resource(f, ctx, res);
+ if (ret)
+ return ret;
}
return 0;
}
+static void
+hws_debug_dump_action_ste_table(struct seq_file *f,
+ struct mlx5hws_action_ste_table *action_tbl)
+{
+ int ste_0_id = mlx5hws_pool_get_base_id(action_tbl->pool);
+ int ste_1_id = mlx5hws_pool_get_base_mirror_id(action_tbl->pool);
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_ACTION_STE_TABLE,
+ HWS_PTR_TO_ID(action_tbl),
+ action_tbl->rtc_0_id, ste_0_id,
+ action_tbl->rtc_1_id, ste_1_id);
+}
+
+static void hws_debug_dump_action_ste_pool(struct seq_file *f,
+ struct mlx5hws_action_ste_pool *pool)
+{
+ struct mlx5hws_action_ste_table *action_tbl;
+ enum mlx5hws_pool_optimize opt;
+
+ mutex_lock(&pool->lock);
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX;
+ opt++) {
+ list_for_each_entry(action_tbl, &pool->elems[opt].available,
+ list_node) {
+ hws_debug_dump_action_ste_table(f, action_tbl);
+ }
+ }
+ mutex_unlock(&pool->lock);
+}
+
static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx)
{
struct mlx5hws_table *tbl;
- int ret;
+ int ret, i;
ret = hws_debug_dump_context_info(f, ctx);
if (ret)
@@ -435,6 +441,9 @@ static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ct
return ret;
}
+ for (i = 0; i < ctx->queues; i++)
+ hws_debug_dump_action_ste_pool(f, &ctx->action_ste_pool[i]);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
index e44e7ae28f93..89c396f9f266 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
@@ -26,6 +26,8 @@ enum mlx5hws_debug_res_type {
MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205,
MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206,
MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207,
+
+ MLX5HWS_DEBUG_RES_TYPE_ACTION_STE_TABLE = 4300,
};
static inline u64
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index 8fe96eb76baf..d45e1145d197 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -70,7 +70,7 @@
u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
_HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
_HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
- (bit_off) % BITS_IN_DW, second_dw_mask); \
+ (bit_off + BITS_IN_DW) % BITS_IN_DW, second_dw_mask); \
} else { \
_HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
} \
@@ -158,6 +158,218 @@ struct mlx5hws_definer_conv_data {
u32 match_flags;
};
+#define HWS_DEFINER_ENTRY(name)[MLX5HWS_DEFINER_FNAME_##name] = #name
+
+static const char * const hws_definer_fname_to_str[] = {
+ HWS_DEFINER_ENTRY(ETH_SMAC_47_16_O),
+ HWS_DEFINER_ENTRY(ETH_SMAC_47_16_I),
+ HWS_DEFINER_ENTRY(ETH_SMAC_15_0_O),
+ HWS_DEFINER_ENTRY(ETH_SMAC_15_0_I),
+ HWS_DEFINER_ENTRY(ETH_DMAC_47_16_O),
+ HWS_DEFINER_ENTRY(ETH_DMAC_47_16_I),
+ HWS_DEFINER_ENTRY(ETH_DMAC_15_0_O),
+ HWS_DEFINER_ENTRY(ETH_DMAC_15_0_I),
+ HWS_DEFINER_ENTRY(ETH_TYPE_O),
+ HWS_DEFINER_ENTRY(ETH_TYPE_I),
+ HWS_DEFINER_ENTRY(ETH_L3_TYPE_O),
+ HWS_DEFINER_ENTRY(ETH_L3_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_TYPE_O),
+ HWS_DEFINER_ENTRY(VLAN_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_O),
+ HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_I),
+ HWS_DEFINER_ENTRY(VLAN_CFI_O),
+ HWS_DEFINER_ENTRY(VLAN_CFI_I),
+ HWS_DEFINER_ENTRY(VLAN_ID_O),
+ HWS_DEFINER_ENTRY(VLAN_ID_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_ID_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_ID_I),
+ HWS_DEFINER_ENTRY(IPV4_IHL_O),
+ HWS_DEFINER_ENTRY(IPV4_IHL_I),
+ HWS_DEFINER_ENTRY(IP_DSCP_O),
+ HWS_DEFINER_ENTRY(IP_DSCP_I),
+ HWS_DEFINER_ENTRY(IP_ECN_O),
+ HWS_DEFINER_ENTRY(IP_ECN_I),
+ HWS_DEFINER_ENTRY(IP_TTL_O),
+ HWS_DEFINER_ENTRY(IP_TTL_I),
+ HWS_DEFINER_ENTRY(IPV4_DST_O),
+ HWS_DEFINER_ENTRY(IPV4_DST_I),
+ HWS_DEFINER_ENTRY(IPV4_SRC_O),
+ HWS_DEFINER_ENTRY(IPV4_SRC_I),
+ HWS_DEFINER_ENTRY(IP_VERSION_O),
+ HWS_DEFINER_ENTRY(IP_VERSION_I),
+ HWS_DEFINER_ENTRY(IP_FRAG_O),
+ HWS_DEFINER_ENTRY(IP_FRAG_I),
+ HWS_DEFINER_ENTRY(IP_LEN_O),
+ HWS_DEFINER_ENTRY(IP_LEN_I),
+ HWS_DEFINER_ENTRY(IP_TOS_O),
+ HWS_DEFINER_ENTRY(IP_TOS_I),
+ HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_O),
+ HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_127_96_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_95_64_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_63_32_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_31_0_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_127_96_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_95_64_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_63_32_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_31_0_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_127_96_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_95_64_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_63_32_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_31_0_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_127_96_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_95_64_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_63_32_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_31_0_I),
+ HWS_DEFINER_ENTRY(IP_PROTOCOL_O),
+ HWS_DEFINER_ENTRY(IP_PROTOCOL_I),
+ HWS_DEFINER_ENTRY(L4_SPORT_O),
+ HWS_DEFINER_ENTRY(L4_SPORT_I),
+ HWS_DEFINER_ENTRY(L4_DPORT_O),
+ HWS_DEFINER_ENTRY(L4_DPORT_I),
+ HWS_DEFINER_ENTRY(TCP_FLAGS_I),
+ HWS_DEFINER_ENTRY(TCP_FLAGS_O),
+ HWS_DEFINER_ENTRY(TCP_SEQ_NUM),
+ HWS_DEFINER_ENTRY(TCP_ACK_NUM),
+ HWS_DEFINER_ENTRY(GTP_TEID),
+ HWS_DEFINER_ENTRY(GTP_MSG_TYPE),
+ HWS_DEFINER_ENTRY(GTP_EXT_FLAG),
+ HWS_DEFINER_ENTRY(GTP_NEXT_EXT_HDR),
+ HWS_DEFINER_ENTRY(GTP_EXT_HDR_PDU),
+ HWS_DEFINER_ENTRY(GTP_EXT_HDR_QFI),
+ HWS_DEFINER_ENTRY(GTPU_DW0),
+ HWS_DEFINER_ENTRY(GTPU_FIRST_EXT_DW0),
+ HWS_DEFINER_ENTRY(GTPU_DW2),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_0),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_1),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_2),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_3),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_4),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_5),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_6),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_7),
+ HWS_DEFINER_ENTRY(VPORT_REG_C_0),
+ HWS_DEFINER_ENTRY(VXLAN_FLAGS),
+ HWS_DEFINER_ENTRY(VXLAN_VNI),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_FLAGS),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD0),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_PROTO),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_VNI),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_LEN),
+ HWS_DEFINER_ENTRY(GENEVE_OAM),
+ HWS_DEFINER_ENTRY(GENEVE_PROTO),
+ HWS_DEFINER_ENTRY(GENEVE_VNI),
+ HWS_DEFINER_ENTRY(SOURCE_QP),
+ HWS_DEFINER_ENTRY(SOURCE_GVMI),
+ HWS_DEFINER_ENTRY(REG_0),
+ HWS_DEFINER_ENTRY(REG_1),
+ HWS_DEFINER_ENTRY(REG_2),
+ HWS_DEFINER_ENTRY(REG_3),
+ HWS_DEFINER_ENTRY(REG_4),
+ HWS_DEFINER_ENTRY(REG_5),
+ HWS_DEFINER_ENTRY(REG_6),
+ HWS_DEFINER_ENTRY(REG_7),
+ HWS_DEFINER_ENTRY(REG_8),
+ HWS_DEFINER_ENTRY(REG_9),
+ HWS_DEFINER_ENTRY(REG_10),
+ HWS_DEFINER_ENTRY(REG_11),
+ HWS_DEFINER_ENTRY(REG_A),
+ HWS_DEFINER_ENTRY(REG_B),
+ HWS_DEFINER_ENTRY(GRE_KEY_PRESENT),
+ HWS_DEFINER_ENTRY(GRE_C),
+ HWS_DEFINER_ENTRY(GRE_K),
+ HWS_DEFINER_ENTRY(GRE_S),
+ HWS_DEFINER_ENTRY(GRE_PROTOCOL),
+ HWS_DEFINER_ENTRY(GRE_OPT_KEY),
+ HWS_DEFINER_ENTRY(GRE_OPT_SEQ),
+ HWS_DEFINER_ENTRY(GRE_OPT_CHECKSUM),
+ HWS_DEFINER_ENTRY(INTEGRITY_O),
+ HWS_DEFINER_ENTRY(INTEGRITY_I),
+ HWS_DEFINER_ENTRY(ICMP_DW1),
+ HWS_DEFINER_ENTRY(ICMP_DW2),
+ HWS_DEFINER_ENTRY(ICMP_DW3),
+ HWS_DEFINER_ENTRY(IPSEC_SPI),
+ HWS_DEFINER_ENTRY(IPSEC_SEQUENCE_NUMBER),
+ HWS_DEFINER_ENTRY(IPSEC_SYNDROME),
+ HWS_DEFINER_ENTRY(MPLS0_O),
+ HWS_DEFINER_ENTRY(MPLS1_O),
+ HWS_DEFINER_ENTRY(MPLS2_O),
+ HWS_DEFINER_ENTRY(MPLS3_O),
+ HWS_DEFINER_ENTRY(MPLS4_O),
+ HWS_DEFINER_ENTRY(MPLS0_I),
+ HWS_DEFINER_ENTRY(MPLS1_I),
+ HWS_DEFINER_ENTRY(MPLS2_I),
+ HWS_DEFINER_ENTRY(MPLS3_I),
+ HWS_DEFINER_ENTRY(MPLS4_I),
+ HWS_DEFINER_ENTRY(FLEX_PARSER0_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER1_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER2_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER3_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER4_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER5_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER6_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER7_OK),
+ HWS_DEFINER_ENTRY(OKS2_MPLS0_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS1_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS2_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS3_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS4_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS0_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS1_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS2_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS3_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS4_I),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_0),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_2),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_3),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_4),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_5),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_6),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_7),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_0),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_2),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_3),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_4),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_5),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_6),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_7),
+ HWS_DEFINER_ENTRY(IB_L4_OPCODE),
+ HWS_DEFINER_ENTRY(IB_L4_QPN),
+ HWS_DEFINER_ENTRY(IB_L4_A),
+ HWS_DEFINER_ENTRY(RANDOM_NUM),
+ HWS_DEFINER_ENTRY(PTYPE_L2_O),
+ HWS_DEFINER_ENTRY(PTYPE_L2_I),
+ HWS_DEFINER_ENTRY(PTYPE_L3_O),
+ HWS_DEFINER_ENTRY(PTYPE_L3_I),
+ HWS_DEFINER_ENTRY(PTYPE_L4_O),
+ HWS_DEFINER_ENTRY(PTYPE_L4_I),
+ HWS_DEFINER_ENTRY(PTYPE_L4_EXT_O),
+ HWS_DEFINER_ENTRY(PTYPE_L4_EXT_I),
+ HWS_DEFINER_ENTRY(PTYPE_FRAG_O),
+ HWS_DEFINER_ENTRY(PTYPE_FRAG_I),
+ HWS_DEFINER_ENTRY(TNL_HDR_0),
+ HWS_DEFINER_ENTRY(TNL_HDR_1),
+ HWS_DEFINER_ENTRY(TNL_HDR_2),
+ HWS_DEFINER_ENTRY(TNL_HDR_3),
+ [MLX5HWS_DEFINER_FNAME_MAX] = "DEFINER_FNAME_UNKNOWN",
+};
+
+const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname)
+{
+ if (fname > MLX5HWS_DEFINER_FNAME_MAX)
+ fname = MLX5HWS_DEFINER_FNAME_MAX;
+ return hws_definer_fname_to_str[fname];
+}
+
static void
hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
void *match_param,
@@ -500,7 +712,8 @@ hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
return 0;
err_conflict:
- mlx5hws_err(cd->ctx, "Invalid definer fields combination\n");
+ mlx5hws_err(cd->ctx, "Invalid definer fields combination: match_flags = 0x%08x\n",
+ cd->match_flags);
return -EINVAL;
}
@@ -508,7 +721,7 @@ static int
hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
u32 *match_param)
{
- bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
struct mlx5hws_definer_fc *fc = cd->fc;
struct mlx5hws_definer_fc *curr_fc;
u32 *s_ipv6, *d_ipv6;
@@ -520,6 +733,20 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
return -EINVAL;
}
+ ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
+ outer_headers.src_ipv4_src_ipv6,
+ 0x80) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ outer_headers.dst_ipv4_dst_ipv6, 0x80);
+ ip_ver_set = HWS_IS_FLD_SET(match_param, outer_headers.ip_version) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.ethertype);
+
+ if (ip_addr_set && !ip_ver_set) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported match on IP address without version or ethertype\n");
+ return -EINVAL;
+ }
+
/* L2 Check ethertype */
HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
outer_headers.ethertype,
@@ -558,6 +785,9 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
outer_headers.ip_protocol,
eth_l3_outer.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_VERSION_O,
+ outer_headers.ip_version,
+ eth_l3_outer.ip_version);
HWS_SET_HDR(fc, match_param, IP_TTL_O,
outer_headers.ttl_hoplimit,
eth_l3_outer.time_to_live_hop_limit);
@@ -569,10 +799,16 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
/* Assume IPv6 is used if ipv6 bits are set */
- is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
- is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+ is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
+ d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ /* IHL is an IPv4-specific field. */
+ if (is_ipv6 && HWS_IS_FLD_SET(match_param, outer_headers.ipv4_ihl)) {
+ mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
+ return -EINVAL;
+ }
- if (is_s_ipv6) {
+ if (is_ipv6) {
/* Handle IPv6 source address */
HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -586,13 +822,6 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_src_outer.ipv6_address_31_0);
- } else {
- /* Handle IPv4 source address */
- HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
- ipv4_src_dest_outer.source_address);
- }
- if (is_d_ipv6) {
/* Handle IPv6 destination address */
HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -607,6 +836,10 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_dst_outer.ipv6_address_31_0);
} else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.source_address);
/* Handle IPv4 destination address */
HWS_SET_HDR(fc, match_param, IPV4_DST_O,
outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
@@ -664,7 +897,7 @@ static int
hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
u32 *match_param)
{
- bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
struct mlx5hws_definer_fc *fc = cd->fc;
struct mlx5hws_definer_fc *curr_fc;
u32 *s_ipv6, *d_ipv6;
@@ -676,6 +909,20 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
return -EINVAL;
}
+ ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
+ inner_headers.src_ipv4_src_ipv6,
+ 0x80) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ inner_headers.dst_ipv4_dst_ipv6, 0x80);
+ ip_ver_set = HWS_IS_FLD_SET(match_param, inner_headers.ip_version) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.ethertype);
+
+ if (ip_addr_set && !ip_ver_set) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported match on IP address without version or ethertype\n");
+ return -EINVAL;
+ }
+
/* L2 Check ethertype */
HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
inner_headers.ethertype,
@@ -727,10 +974,16 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
/* Assume IPv6 is used if ipv6 bits are set */
- is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
- is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+ is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
+ d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
- if (is_s_ipv6) {
+ /* IHL is an IPv4-specific field. */
+ if (is_ipv6 && HWS_IS_FLD_SET(match_param, inner_headers.ipv4_ihl)) {
+ mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
+ return -EINVAL;
+ }
+
+ if (is_ipv6) {
/* Handle IPv6 source address */
HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -744,13 +997,6 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_src_inner.ipv6_address_31_0);
- } else {
- /* Handle IPv4 source address */
- HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
- ipv4_src_dest_inner.source_address);
- }
- if (is_d_ipv6) {
/* Handle IPv6 destination address */
HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -765,6 +1011,10 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_dst_inner.ipv6_address_31_0);
} else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.source_address);
/* Handle IPv4 destination address */
HWS_SET_HDR(fc, match_param, IPV4_DST_I,
inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
@@ -1985,8 +2235,7 @@ int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
continue;
/* Reuse definer and set LRU (move to be first in the list) */
- list_del_init(&cached_definer->list_node);
- list_add(&cached_definer->list_node, &cache->list_head);
+ list_move(&cached_definer->list_node, &cache->list_head);
cached_definer->refcount++;
return cached_definer->definer.obj_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
index 9432d5084def..62da55389331 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
@@ -785,7 +785,7 @@ struct mlx5hws_definer_cache {
struct mlx5hws_definer_cache_item {
struct mlx5hws_definer definer;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
struct list_head list_node;
};
@@ -831,4 +831,6 @@ mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
u32 *match_param,
int *fc_sz);
+const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname);
+
#endif /* HWS_DEFINER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
new file mode 100644
index 000000000000..bf4643d0ce17
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -0,0 +1,1648 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include <linux/mlx5/vport.h>
+#include <mlx5_core.h>
+#include <fs_core.h>
+#include <fs_cmd.h>
+#include "fs_hws_pools.h"
+#include "mlx5hws.h"
+
+#define MLX5HWS_CTX_MAX_NUM_OF_QUEUES 16
+#define MLX5HWS_CTX_QUEUE_SIZE 256
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx);
+static void
+mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
+ unsigned long index);
+static void
+mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+ unsigned long index);
+
+static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
+ struct mlx5_fs_hws_context *fs_ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ enum mlx5hws_action_type action_type;
+ int err = -ENOSPC;
+
+ hws_pool->tag_action = mlx5hws_action_create_tag(ctx, flags);
+ if (!hws_pool->tag_action)
+ return err;
+ hws_pool->pop_vlan_action = mlx5hws_action_create_pop_vlan(ctx, flags);
+ if (!hws_pool->pop_vlan_action)
+ goto destroy_tag;
+ hws_pool->push_vlan_action = mlx5hws_action_create_push_vlan(ctx, flags);
+ if (!hws_pool->push_vlan_action)
+ goto destroy_pop_vlan;
+ hws_pool->drop_action = mlx5hws_action_create_dest_drop(ctx, flags);
+ if (!hws_pool->drop_action)
+ goto destroy_push_vlan;
+ action_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
+ hws_pool->decapl2_action =
+ mlx5hws_action_create_reformat(ctx, action_type, 1,
+ &reformat_hdr, 0, flags);
+ if (!hws_pool->decapl2_action)
+ goto destroy_drop;
+ hws_pool->remove_hdr_vlan_action =
+ mlx5_fs_create_action_remove_header_vlan(ctx);
+ if (!hws_pool->remove_hdr_vlan_action)
+ goto destroy_decapl2;
+ err = mlx5_fs_hws_pr_pool_init(&hws_pool->insert_hdr_pool, dev, 0,
+ MLX5HWS_ACTION_TYP_INSERT_HEADER);
+ if (err)
+ goto destroy_remove_hdr;
+ err = mlx5_fs_hws_pr_pool_init(&hws_pool->dl3tnltol2_pool, dev, 0,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2);
+ if (err)
+ goto cleanup_insert_hdr;
+ xa_init(&hws_pool->el2tol3tnl_pools);
+ xa_init(&hws_pool->el2tol2tnl_pools);
+ xa_init(&hws_pool->mh_pools);
+ xa_init(&hws_pool->table_dests);
+ xa_init(&hws_pool->vport_dests);
+ xa_init(&hws_pool->vport_vhca_dests);
+ xa_init(&hws_pool->aso_meters);
+ xa_init(&hws_pool->sample_dests);
+ return 0;
+
+cleanup_insert_hdr:
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
+destroy_remove_hdr:
+ mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
+destroy_decapl2:
+ mlx5hws_action_destroy(hws_pool->decapl2_action);
+destroy_drop:
+ mlx5hws_action_destroy(hws_pool->drop_action);
+destroy_push_vlan:
+ mlx5hws_action_destroy(hws_pool->push_vlan_action);
+destroy_pop_vlan:
+ mlx5hws_action_destroy(hws_pool->pop_vlan_action);
+destroy_tag:
+ mlx5hws_action_destroy(hws_pool->tag_action);
+ return err;
+}
+
+static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5_fs_hws_data *fs_hws_data;
+ struct mlx5hws_action *action;
+ struct mlx5_fs_pool *pool;
+ unsigned long i;
+
+ xa_for_each(&hws_pool->sample_dests, i, fs_hws_data)
+ kfree(fs_hws_data);
+ xa_destroy(&hws_pool->sample_dests);
+ xa_for_each(&hws_pool->aso_meters, i, fs_hws_data)
+ kfree(fs_hws_data);
+ xa_destroy(&hws_pool->aso_meters);
+ xa_for_each(&hws_pool->vport_vhca_dests, i, action)
+ mlx5hws_action_destroy(action);
+ xa_destroy(&hws_pool->vport_vhca_dests);
+ xa_for_each(&hws_pool->vport_dests, i, action)
+ mlx5hws_action_destroy(action);
+ xa_destroy(&hws_pool->vport_dests);
+ xa_destroy(&hws_pool->table_dests);
+ xa_for_each(&hws_pool->mh_pools, i, pool)
+ mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, i);
+ xa_destroy(&hws_pool->mh_pools);
+ xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
+ mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
+ xa_destroy(&hws_pool->el2tol2tnl_pools);
+ xa_for_each(&hws_pool->el2tol3tnl_pools, i, pool)
+ mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol3tnl_pools, i);
+ xa_destroy(&hws_pool->el2tol3tnl_pools);
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->dl3tnltol2_pool);
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
+ mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
+ mlx5hws_action_destroy(hws_pool->decapl2_action);
+ mlx5hws_action_destroy(hws_pool->drop_action);
+ mlx5hws_action_destroy(hws_pool->push_vlan_action);
+ mlx5hws_action_destroy(hws_pool->pop_vlan_action);
+ mlx5hws_action_destroy(hws_pool->tag_action);
+}
+
+static int mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace *ns)
+{
+ struct mlx5hws_context_attr hws_ctx_attr = {};
+ int err;
+
+ hws_ctx_attr.queues = min_t(int, num_online_cpus(),
+ MLX5HWS_CTX_MAX_NUM_OF_QUEUES);
+ hws_ctx_attr.queue_size = MLX5HWS_CTX_QUEUE_SIZE;
+
+ ns->fs_hws_context.hws_ctx =
+ mlx5hws_context_open(ns->dev, &hws_ctx_attr);
+ if (!ns->fs_hws_context.hws_ctx) {
+ mlx5_core_err(ns->dev, "Failed to create hws flow namespace\n");
+ return -EINVAL;
+ }
+ err = mlx5_fs_init_hws_actions_pool(ns->dev, &ns->fs_hws_context);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed to init hws actions pool\n");
+ mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
+ return err;
+ }
+ return 0;
+}
+
+static int mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace *ns)
+{
+ mlx5_fs_cleanup_hws_actions_pool(&ns->fs_hws_context);
+ return mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
+}
+
+static int mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns,
+ u16 peer_vhca_id)
+{
+ struct mlx5hws_context *peer_ctx = NULL;
+
+ if (peer_ns)
+ peer_ctx = peer_ns->fs_hws_context.hws_ctx;
+ mlx5hws_context_set_peer(ns->fs_hws_context.hws_ctx, peer_ctx,
+ peer_vhca_id);
+ return 0;
+}
+
+static int mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5hws_table *next_tbl;
+ int err;
+
+ if (!ns->fs_hws_context.hws_ctx)
+ return -EINVAL;
+
+ /* if no change required, return */
+ if (!next_ft && !ft->fs_hws_table.miss_ft_set)
+ return 0;
+
+ next_tbl = next_ft ? next_ft->fs_hws_table.hws_table : NULL;
+ err = mlx5hws_table_set_default_miss(ft->fs_hws_table.hws_table, next_tbl);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed setting FT default miss (%d)\n", err);
+ return err;
+ }
+ ft->fs_hws_table.miss_ft_set = !!next_tbl;
+ return 0;
+}
+
+static int mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action *dest_ft_action;
+ struct xarray *dests_xa;
+ int err;
+
+ dest_ft_action = mlx5hws_action_create_dest_table_num(fs_ctx->hws_ctx,
+ ft->id, flags);
+ if (!dest_ft_action) {
+ mlx5_core_err(ns->dev, "Failed creating dest table action\n");
+ return -ENOMEM;
+ }
+
+ dests_xa = &fs_ctx->hws_pool.table_dests;
+ err = xa_insert(dests_xa, ft->id, dest_ft_action, GFP_KERNEL);
+ if (err)
+ mlx5hws_action_destroy(dest_ft_action);
+ return err;
+}
+
+static int mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action *dest_ft_action;
+ struct xarray *dests_xa;
+ int err;
+
+ dests_xa = &fs_ctx->hws_pool.table_dests;
+ dest_ft_action = xa_erase(dests_xa, ft->id);
+ if (!dest_ft_action) {
+ mlx5_core_err(ns->dev, "Failed to erase dest ft action\n");
+ return -ENOENT;
+ }
+
+ err = mlx5hws_action_destroy(dest_ft_action);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy dest ft action\n");
+ return err;
+}
+
+static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table_attr *ft_attr,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5hws_context *ctx = ns->fs_hws_context.hws_ctx;
+ struct mlx5hws_table_attr tbl_attr = {};
+ struct mlx5hws_table *tbl;
+ int err;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft)) {
+ err = mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr,
+ next_ft);
+ if (err)
+ return err;
+ err = mlx5_fs_add_flow_table_dest_action(ns, ft);
+ if (err)
+ mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+ return err;
+ }
+
+ if (ns->table_type != FS_FT_FDB) {
+ mlx5_core_err(ns->dev, "Table type %d not supported for HWS\n",
+ ns->table_type);
+ return -EOPNOTSUPP;
+ }
+
+ tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
+ tbl_attr.level = ft_attr->level;
+ tbl = mlx5hws_table_create(ctx, &tbl_attr);
+ if (!tbl) {
+ mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
+ return -EINVAL;
+ }
+
+ ft->fs_hws_table.hws_table = tbl;
+ ft->id = mlx5hws_table_get_id(tbl);
+
+ if (next_ft) {
+ err = mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
+ if (err)
+ goto destroy_table;
+ }
+
+ ft->max_fte = INT_MAX;
+
+ err = mlx5_fs_add_flow_table_dest_action(ns, ft);
+ if (err)
+ goto clear_ft_miss;
+ return 0;
+
+clear_ft_miss:
+ mlx5_fs_set_ft_default_miss(ns, ft, NULL);
+destroy_table:
+ mlx5hws_table_destroy(tbl);
+ ft->fs_hws_table.hws_table = NULL;
+ return err;
+}
+
+static int mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ int err;
+
+ err = mlx5_fs_del_flow_table_dest_action(ns, ft);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to remove dest action (%d)\n", err);
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+
+ err = mlx5_fs_set_ft_default_miss(ns, ft, NULL);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to disconnect next table (%d)\n", err);
+
+ err = mlx5hws_table_destroy(ft->fs_hws_table.hws_table);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", err);
+
+ return err;
+}
+
+static int mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
+
+ return mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
+}
+
+static int mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 underlay_qpn,
+ bool disconnect)
+{
+ return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
+ disconnect);
+}
+
+static int mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft, u32 *in,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5hws_match_parameters mask;
+ struct mlx5hws_bwc_matcher *matcher;
+ u8 match_criteria_enable;
+ u32 priority;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg);
+
+ mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ mask.match_sz = sizeof(fg->mask.match_criteria);
+
+ match_criteria_enable = MLX5_GET(create_flow_group_in, in,
+ match_criteria_enable);
+ priority = MLX5_GET(create_flow_group_in, in, start_flow_index);
+ matcher = mlx5hws_bwc_matcher_create(ft->fs_hws_table.hws_table,
+ priority, match_criteria_enable,
+ &mask);
+ if (!matcher) {
+ mlx5_core_err(ns->dev, "Failed creating matcher\n");
+ return -EINVAL;
+ }
+
+ fg->fs_hws_matcher.matcher = matcher;
+ return 0;
+}
+
+static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
+
+ return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 table_num = dst->dest_attr.ft_num;
+
+ return xa_load(&fs_ctx->hws_pool.table_dests, table_num);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ u32 table_num = dst->dest_attr.ft_num;
+
+ return mlx5hws_action_create_dest_table_num(ctx, table_num, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst,
+ bool is_dest_type_uplink)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5hws_action *dest;
+ struct xarray *dests_xa;
+ bool vhca_id_valid;
+ unsigned long idx;
+ u16 vport_num;
+ int err;
+
+ vhca_id_valid = is_dest_type_uplink ||
+ (dest_attr->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID);
+ vport_num = is_dest_type_uplink ? MLX5_VPORT_UPLINK : dest_attr->vport.num;
+ if (vhca_id_valid) {
+ dests_xa = &fs_ctx->hws_pool.vport_vhca_dests;
+ idx = (unsigned long)dest_attr->vport.vhca_id << 16 | vport_num;
+ } else {
+ dests_xa = &fs_ctx->hws_pool.vport_dests;
+ idx = vport_num;
+ }
+dest_load:
+ dest = xa_load(dests_xa, idx);
+ if (dest)
+ return dest;
+
+ dest = mlx5hws_action_create_dest_vport(ctx, vport_num, vhca_id_valid,
+ dest_attr->vport.vhca_id, flags);
+
+ err = xa_insert(dests_xa, idx, dest, GFP_KERNEL);
+ if (err) {
+ mlx5hws_action_destroy(dest);
+ dest = NULL;
+
+ if (err == -EBUSY)
+ /* xarray entry was already stored by another thread */
+ goto dest_load;
+ }
+
+ return dest;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+ return mlx5hws_action_create_dest_match_range(ctx,
+ dest_attr->range.field,
+ dest_attr->range.hit_ft,
+ dest_attr->range.miss_ft,
+ dest_attr->range.min,
+ dest_attr->range.max,
+ flags);
+}
+
+static struct mlx5_fs_hws_data *
+mlx5_fs_get_cached_hws_data(struct xarray *cache_xa, unsigned long index)
+{
+ struct mlx5_fs_hws_data *fs_hws_data;
+ int err;
+
+ xa_lock(cache_xa);
+ fs_hws_data = xa_load(cache_xa, index);
+ if (!fs_hws_data) {
+ fs_hws_data = kzalloc(sizeof(*fs_hws_data), GFP_ATOMIC);
+ if (!fs_hws_data) {
+ xa_unlock(cache_xa);
+ return NULL;
+ }
+ refcount_set(&fs_hws_data->hws_action_refcount, 0);
+ mutex_init(&fs_hws_data->lock);
+ err = __xa_insert(cache_xa, index, fs_hws_data, GFP_ATOMIC);
+ if (err) {
+ kfree(fs_hws_data);
+ xa_unlock(cache_xa);
+ return NULL;
+ }
+ }
+ xa_unlock(cache_xa);
+
+ return fs_hws_data;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_exe_aso *exe_aso)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_data *meter_hws_data;
+ u32 id = exe_aso->base_id;
+ struct xarray *meters_xa;
+
+ meters_xa = &fs_ctx->hws_pool.aso_meters;
+ meter_hws_data = mlx5_fs_get_cached_hws_data(meters_xa, id);
+ if (!meter_hws_data)
+ return NULL;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_ASO_METER;
+ create_ctx.id = id;
+ create_ctx.return_reg_id = exe_aso->return_reg_id;
+
+ return mlx5_fs_get_hws_action(meter_hws_data, &create_ctx);
+}
+
+static void mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_exe_aso *exe_aso)
+{
+ struct mlx5_fs_hws_data *meter_hws_data;
+ struct xarray *meters_xa;
+
+ meters_xa = &fs_ctx->hws_pool.aso_meters;
+ meter_hws_data = xa_load(meters_xa, exe_aso->base_id);
+ if (!meter_hws_data)
+ return;
+ return mlx5_fs_put_hws_action(meter_hws_data);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_data *sampler_hws_data;
+ u32 id = dst->dest_attr.sampler_id;
+ struct xarray *sampler_xa;
+
+ sampler_xa = &fs_ctx->hws_pool.sample_dests;
+ sampler_hws_data = mlx5_fs_get_cached_hws_data(sampler_xa, id);
+ if (!sampler_hws_data)
+ return NULL;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_SAMPLER;
+ create_ctx.id = id;
+
+ return mlx5_fs_get_hws_action(sampler_hws_data, &create_ctx);
+}
+
+static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
+ u32 sampler_id)
+{
+ struct mlx5_fs_hws_data *sampler_hws_data;
+ struct xarray *sampler_xa;
+
+ sampler_xa = &fs_ctx->hws_pool.sample_dests;
+ sampler_hws_data = xa_load(sampler_xa, sampler_id);
+ if (!sampler_hws_data)
+ return;
+
+ mlx5_fs_put_hws_action(sampler_hws_data);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_dest_attr *dests,
+ u32 num_of_dests, bool ignore_flow_level,
+ u32 flow_source)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
+ ignore_flow_level,
+ flow_source, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.push_vlan_action;
+}
+
+static u32 mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan *vlan)
+{
+ u16 n_ethtype = vlan->ethtype;
+ u8 prio = vlan->prio;
+ u16 vid = vlan->vid;
+
+ return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.pop_vlan_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.decapl2_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.drop_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_tag(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.tag_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ return mlx5hws_action_create_last(ctx, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx *create_ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ switch (create_ctx->actions_type) {
+ case MLX5HWS_ACTION_TYP_CTR:
+ return mlx5hws_action_create_counter(create_ctx->hws_ctx,
+ create_ctx->id, flags);
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ return mlx5hws_action_create_aso_meter(create_ctx->hws_ctx,
+ create_ctx->id,
+ create_ctx->return_reg_id,
+ flags);
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ return mlx5hws_action_create_flow_sampler(create_ctx->hws_ctx,
+ create_ctx->id, flags);
+ default:
+ return NULL;
+ }
+}
+
+struct mlx5hws_action *
+mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
+ struct mlx5_fs_hws_create_action_ctx *create_ctx)
+{
+ /* try avoid locking if not necessary */
+ if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount))
+ return fs_hws_data->hws_action;
+
+ mutex_lock(&fs_hws_data->lock);
+ if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) {
+ mutex_unlock(&fs_hws_data->lock);
+ return fs_hws_data->hws_action;
+ }
+ fs_hws_data->hws_action = mlx5_fs_create_hws_action(create_ctx);
+ if (!fs_hws_data->hws_action) {
+ mutex_unlock(&fs_hws_data->lock);
+ return NULL;
+ }
+ refcount_set(&fs_hws_data->hws_action_refcount, 1);
+ mutex_unlock(&fs_hws_data->lock);
+
+ return fs_hws_data->hws_action;
+}
+
+void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data)
+{
+ if (!fs_hws_data)
+ return;
+
+ /* try avoid locking if not necessary */
+ if (refcount_dec_not_one(&fs_hws_data->hws_action_refcount))
+ return;
+
+ mutex_lock(&fs_hws_data->lock);
+ if (!refcount_dec_and_test(&fs_hws_data->hws_action_refcount)) {
+ mutex_unlock(&fs_hws_data->lock);
+ return;
+ }
+ mlx5hws_action_destroy(fs_hws_data->hws_action);
+ fs_hws_data->hws_action = NULL;
+ mutex_unlock(&fs_hws_data->lock);
+}
+
+static void mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_fs_hws_rule_action *fs_action)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+
+ switch (mlx5hws_action_get_type(fs_action->action)) {
+ case MLX5HWS_ACTION_TYP_CTR:
+ mlx5_fc_put_hws_action(fs_action->counter);
+ break;
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ mlx5_fs_put_action_aso_meter(fs_ctx, fs_action->exe_aso);
+ break;
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ mlx5_fs_put_dest_action_sampler(fs_ctx, fs_action->sampler_id);
+ break;
+ default:
+ mlx5hws_action_destroy(fs_action->action);
+ }
+}
+
+static void
+mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_fs_hws_rule_action **fs_actions,
+ int *num_fs_actions)
+{
+ int i;
+
+ /* Free in reverse order to handle action dependencies */
+ for (i = *num_fs_actions - 1; i >= 0; i--)
+ mlx5_fs_destroy_fs_action(ns, *fs_actions + i);
+ *num_fs_actions = 0;
+ kfree(*fs_actions);
+ *fs_actions = NULL;
+}
+
+/* Splits FTE's actions into cached, rule and destination actions.
+ * The cached and destination actions are saved on the fte hws rule.
+ * The rule actions are returned as a parameter, together with their count.
+ * We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
+static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte,
+ struct mlx5hws_rule_action **ractions)
+{
+ struct mlx5_flow_act *fte_action = &fte->act_dests.action;
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action_dest_attr *dest_actions;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_rule_action *fs_actions;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5hws_action *dest_action;
+ struct mlx5hws_action *tmp_action;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_hws_mh *mh_data;
+ bool delay_encap_set = false;
+ struct mlx5_flow_rule *dst;
+ int num_dest_actions = 0;
+ int num_fs_actions = 0;
+ int num_actions = 0;
+ int err;
+
+ *ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions),
+ GFP_KERNEL);
+ if (!*ractions) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*fs_actions), GFP_KERNEL);
+ if (!fs_actions) {
+ err = -ENOMEM;
+ goto free_actions_alloc;
+ }
+
+ dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*dest_actions), GFP_KERNEL);
+ if (!dest_actions) {
+ err = -ENOMEM;
+ goto free_fs_actions_alloc;
+ }
+
+ /* The order of the actions are must to be kept, only the following
+ * order is supported by HW steering:
+ * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan
+ * -> reformat (insert_hdr/encap) -> ctr -> tag -> aso
+ * -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last
+ */
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ tmp_action = mlx5_fs_get_action_decap_tnl_l2_to_l2(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_dest_actions_alloc;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ int reformat_type = fte_action->pkt_reformat->reformat_type;
+
+ if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+ mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n");
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) {
+ pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+ (*ractions)[num_actions].reformat.offset = pr_data->offset;
+ (*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx;
+ (*ractions)[num_actions].reformat.data = pr_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ } else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) {
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ } else {
+ delay_encap_set = true;
+ }
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+ tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mh_data = fte_action->modify_hdr->fs_hws_action.mh_data;
+ (*ractions)[num_actions].modify_header.offset = mh_data->offset;
+ (*ractions)[num_actions].modify_header.data = mh_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->modify_hdr->fs_hws_action.hws_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].push_vlan.vlan_hdr =
+ htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[0]));
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].push_vlan.vlan_hdr =
+ htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[1]));
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (delay_encap_set) {
+ pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+ (*ractions)[num_actions].reformat.offset = pr_data->offset;
+ (*ractions)[num_actions].reformat.data = pr_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ struct mlx5_fc *counter;
+
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ counter = dst->dest_attr.counter;
+ tmp_action = mlx5_fc_get_hws_action(ctx, counter);
+ if (!tmp_action) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ (*ractions)[num_actions].counter.offset =
+ mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter);
+ (*ractions)[num_actions++].action = tmp_action;
+ fs_actions[num_fs_actions].action = tmp_action;
+ fs_actions[num_fs_actions++].counter = counter;
+ }
+ }
+
+ if (fte->act_dests.flow_context.flow_tag) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ tmp_action = mlx5_fs_get_action_tag(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag;
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte_action->exe_aso.type != MLX5_EXE_ASO_FLOW_METER ||
+ num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action = mlx5_fs_get_action_aso_meter(fs_ctx,
+ &fte_action->exe_aso);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].aso_meter.offset =
+ fte_action->exe_aso.flow_meter.meter_idx;
+ (*ractions)[num_actions].aso_meter.init_color =
+ fte_action->exe_aso.flow_meter.init_color;
+ (*ractions)[num_actions++].action = tmp_action;
+ fs_actions[num_fs_actions].action = tmp_action;
+ fs_actions[num_fs_actions++].exe_aso = &fte_action->exe_aso;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ dest_action = mlx5_fs_get_dest_action_drop(fs_ctx);
+ if (!dest_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ dest_actions[num_dest_actions++].dest = dest_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ struct mlx5_flow_destination *attr = &dst->dest_attr;
+ bool type_uplink =
+ attr->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+
+ if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ switch (attr->type) {
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
+ if (dst->dest_attr.ft->flags &
+ MLX5_FLOW_TABLE_UPLINK_VPORT)
+ dest_actions[num_dest_actions].is_wire_ft = true;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
+ dst);
+ if (dest_action)
+ break;
+ dest_action = mlx5_fs_create_dest_action_table_num(fs_ctx,
+ dst);
+ fs_actions[num_fs_actions++].action = dest_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ dest_action = mlx5_fs_create_dest_action_range(ctx, dst);
+ fs_actions[num_fs_actions++].action = dest_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
+ type_uplink);
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ dest_action =
+ mlx5_fs_get_dest_action_sampler(fs_ctx,
+ dst);
+ fs_actions[num_fs_actions].action = dest_action;
+ fs_actions[num_fs_actions++].sampler_id =
+ dst->dest_attr.sampler_id;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ if (!dest_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ dest_actions[num_dest_actions++].dest = dest_action;
+ }
+ }
+
+ if (num_dest_actions == 1) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = dest_actions->dest;
+ } else if (num_dest_actions > 1) {
+ u32 flow_source = fte->act_dests.flow_context.flow_source;
+ bool ignore_flow_level;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ ignore_flow_level =
+ !!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ tmp_action = mlx5_fs_create_action_dest_array(ctx, dest_actions,
+ num_dest_actions,
+ ignore_flow_level,
+ flow_source);
+ if (!tmp_action) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ fs_actions[num_fs_actions++].action = tmp_action;
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action = mlx5_fs_create_action_last(ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_actions[num_fs_actions++].action = tmp_action;
+ (*ractions)[num_actions++].action = tmp_action;
+
+ kfree(dest_actions);
+
+ /* Actions created specifically for this rule will be destroyed
+ * once rule is deleted.
+ */
+ fte->fs_hws_rule.num_fs_actions = num_fs_actions;
+ fte->fs_hws_rule.hws_fs_actions = fs_actions;
+
+ return 0;
+
+free_actions:
+ mlx5_fs_destroy_fs_actions(ns, &fs_actions, &num_fs_actions);
+free_dest_actions_alloc:
+ kfree(dest_actions);
+free_fs_actions_alloc:
+ kfree(fs_actions);
+free_actions_alloc:
+ kfree(*ractions);
+ *ractions = NULL;
+out_err:
+ return err;
+}
+
+static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte)
+{
+ struct mlx5hws_match_parameters params;
+ struct mlx5hws_rule_action *ractions;
+ struct mlx5hws_bwc_rule *rule;
+ int err = 0;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
+
+ err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
+ if (err)
+ goto out_err;
+
+ params.match_sz = sizeof(fte->val);
+ params.match_buf = fte->val;
+
+ rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, &params,
+ fte->act_dests.flow_context.flow_source,
+ ractions);
+ kfree(ractions);
+ if (!rule) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ fte->fs_hws_rule.bwc_rule = rule;
+ return 0;
+
+free_actions:
+ mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
+ &fte->fs_hws_rule.num_fs_actions);
+out_err:
+ mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
+ return err;
+}
+
+static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule;
+ int err;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
+
+ err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
+ rule->bwc_rule = NULL;
+
+ mlx5_fs_destroy_fs_actions(ns, &rule->hws_fs_actions,
+ &rule->num_fs_actions);
+
+ return err;
+}
+
+static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ struct mlx5_fs_hws_rule_action *saved_hws_fs_actions;
+ struct mlx5hws_rule_action *ractions;
+ int saved_num_fs_actions;
+ int ret;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group,
+ modify_mask, fte);
+
+ if ((modify_mask & ~allowed_mask) != 0)
+ return -EINVAL;
+
+ saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions;
+ saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions;
+
+ ret = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
+ if (ret)
+ return ret;
+
+ ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions);
+ kfree(ractions);
+ if (ret)
+ goto restore_actions;
+
+ mlx5_fs_destroy_fs_actions(ns, &saved_hws_fs_actions,
+ &saved_num_fs_actions);
+ return ret;
+
+restore_actions:
+ mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
+ &fte->fs_hws_rule.num_fs_actions);
+ fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
+ fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
+ return ret;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_action_remove_header_attr remove_hdr_vlan = {};
+
+ /* MAC anchor not supported in HWS reformat, use VLAN anchor */
+ remove_hdr_vlan.anchor = MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START;
+ remove_hdr_vlan.offset = 0;
+ remove_hdr_vlan.size = sizeof(struct vlan_hdr);
+ return mlx5hws_action_create_remove_header(ctx, &remove_hdr_vlan, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if (!params ||
+ params->param_0 != MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START ||
+ params->param_1 != offsetof(struct vlan_ethhdr, h_vlan_proto) ||
+ params->size != sizeof(struct vlan_hdr))
+ return NULL;
+
+ return fs_ctx->hws_pool.remove_hdr_vlan_action;
+}
+
+static int
+mlx5_fs_verify_insert_header_params(struct mlx5_core_dev *mdev,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if ((!params->data && params->size) || (params->data && !params->size) ||
+ MLX5_CAP_GEN_2(mdev, max_reformat_insert_size) < params->size ||
+ MLX5_CAP_GEN_2(mdev, max_reformat_insert_offset) < params->param_1) {
+ mlx5_core_err(mdev, "Invalid reformat params for INSERT_HDR\n");
+ return -EINVAL;
+ }
+ if (params->param_0 != MLX5_FS_INSERT_HDR_VLAN_ANCHOR ||
+ params->param_1 != MLX5_FS_INSERT_HDR_VLAN_OFFSET ||
+ params->size != MLX5_FS_INSERT_HDR_VLAN_SIZE) {
+ mlx5_core_err(mdev, "Only vlan insert header supported\n");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int
+mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if (params->param_0 || params->param_1) {
+ mlx5_core_err(dev, "Invalid reformat params\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct mlx5_fs_pool *
+mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev *dev, struct xarray *pr_pools,
+ enum mlx5hws_action_type reformat_type, size_t size)
+{
+ struct mlx5_fs_pool *pr_pool;
+ unsigned long index = size;
+ int err;
+
+ pr_pool = xa_load(pr_pools, index);
+ if (pr_pool)
+ return pr_pool;
+
+ pr_pool = kzalloc(sizeof(*pr_pool), GFP_KERNEL);
+ if (!pr_pool)
+ return ERR_PTR(-ENOMEM);
+ err = mlx5_fs_hws_pr_pool_init(pr_pool, dev, size, reformat_type);
+ if (err)
+ goto free_pr_pool;
+ err = xa_insert(pr_pools, index, pr_pool, GFP_KERNEL);
+ if (err)
+ goto cleanup_pr_pool;
+ return pr_pool;
+
+cleanup_pr_pool:
+ mlx5_fs_hws_pr_pool_cleanup(pr_pool);
+free_pr_pool:
+ kfree(pr_pool);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
+ unsigned long index)
+{
+ xa_erase(pr_pools, index);
+ mlx5_fs_hws_pr_pool_cleanup(pool);
+ kfree(pool);
+}
+
+static int
+mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat_params *params,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5_fs_hws_actions_pool *hws_pool;
+ struct mlx5hws_action *hws_action = NULL;
+ struct mlx5_fs_hws_pr *pr_data = NULL;
+ struct mlx5_fs_pool *pr_pool = NULL;
+ struct mlx5_core_dev *dev = ns->dev;
+ u8 hdr_idx = 0;
+ int err;
+
+ if (!params)
+ return -EINVAL;
+
+ hws_pool = &fs_ctx->hws_pool;
+
+ switch (params->type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ params->size);
+ if (IS_ERR(pr_pool))
+ return PTR_ERR(pr_pool);
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol3tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
+ params->size);
+ if (IS_ERR(pr_pool))
+ return PTR_ERR(pr_pool);
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = &hws_pool->dl3tnltol2_pool;
+ hdr_idx = params->size == ETH_HLEN ?
+ MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX :
+ MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX;
+ break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ err = mlx5_fs_verify_insert_header_params(dev, params);
+ if (err)
+ return err;
+ pr_pool = &hws_pool->insert_hdr_pool;
+ break;
+ case MLX5_REFORMAT_TYPE_REMOVE_HDR:
+ hws_action = mlx5_fs_get_action_remove_header_vlan(fs_ctx, params);
+ if (!hws_action)
+ mlx5_core_err(dev, "Only vlan remove header supported\n");
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
+ params->type);
+ return -EOPNOTSUPP;
+ }
+
+ if (pr_pool) {
+ pr_data = mlx5_fs_hws_pr_pool_acquire_pr(pr_pool);
+ if (IS_ERR_OR_NULL(pr_data))
+ return !pr_data ? -EINVAL : PTR_ERR(pr_data);
+ hws_action = pr_data->bulk->hws_action;
+ if (!hws_action) {
+ mlx5_core_err(dev,
+ "Failed allocating packet-reformat action\n");
+ err = -EINVAL;
+ goto release_pr;
+ }
+ pr_data->data = kmemdup(params->data, params->size, GFP_KERNEL);
+ if (!pr_data->data) {
+ err = -ENOMEM;
+ goto release_pr;
+ }
+ pr_data->hdr_idx = hdr_idx;
+ pr_data->data_size = params->size;
+ pkt_reformat->fs_hws_action.pr_data = pr_data;
+ }
+
+ mutex_init(&pkt_reformat->fs_hws_action.lock);
+ pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS;
+ pkt_reformat->fs_hws_action.hws_action = hws_action;
+ return 0;
+
+release_pr:
+ if (pr_pool && pr_data)
+ mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
+ return err;
+}
+
+static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_pool *pr_pool;
+
+ if (pkt_reformat->fs_hws_action.fw_reformat_id != 0) {
+ struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
+
+ fw_pkt_reformat.id = pkt_reformat->fs_hws_action.fw_reformat_id;
+ mlx5_fs_cmd_get_fw_cmds()->
+ packet_reformat_dealloc(ns, &fw_pkt_reformat);
+ pkt_reformat->fs_hws_action.fw_reformat_id = 0;
+ }
+
+ if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
+ return;
+
+ if (!pkt_reformat->fs_hws_action.pr_data) {
+ mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
+ return;
+ }
+ pr_data = pkt_reformat->fs_hws_action.pr_data;
+
+ switch (pkt_reformat->reformat_type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ pr_data->data_size);
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ pr_data->data_size);
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ pr_pool = &hws_pool->dl3tnltol2_pool;
+ break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ pr_pool = &hws_pool->insert_hdr_pool;
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Unknown packet-reformat type\n");
+ return;
+ }
+ if (!pkt_reformat->fs_hws_action.pr_data || IS_ERR(pr_pool)) {
+ mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
+ return;
+ }
+ kfree(pr_data->data);
+ mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
+ pkt_reformat->fs_hws_action.pr_data = NULL;
+}
+
+static struct mlx5_fs_pool *
+mlx5_fs_create_mh_pool(struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern,
+ struct xarray *mh_pools, unsigned long index)
+{
+ struct mlx5_fs_pool *pool;
+ int err;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+ err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern);
+ if (err)
+ goto free_pool;
+ err = xa_insert(mh_pools, index, pool, GFP_KERNEL);
+ if (err)
+ goto cleanup_pool;
+ return pool;
+
+cleanup_pool:
+ mlx5_fs_hws_mh_pool_cleanup(pool);
+free_pool:
+ kfree(pool);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+ unsigned long index)
+{
+ xa_erase(mh_pools, index);
+ mlx5_fs_hws_mh_pool_cleanup(pool);
+ kfree(pool);
+}
+
+static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+ struct mlx5hws_action_mh_pattern pattern = {};
+ struct mlx5_fs_hws_mh *mh_data = NULL;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_pool *pool;
+ unsigned long i, cnt = 0;
+ bool known_pattern;
+ int err;
+
+ pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
+ pattern.data = modify_actions;
+
+ known_pattern = false;
+ xa_for_each(&hws_pool->mh_pools, i, pool) {
+ if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
+ known_pattern = true;
+ break;
+ }
+ cnt++;
+ }
+
+ if (!known_pattern) {
+ pool = mlx5_fs_create_mh_pool(ns->dev, &pattern,
+ &hws_pool->mh_pools, cnt);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ }
+ mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
+ if (IS_ERR(mh_data)) {
+ err = PTR_ERR(mh_data);
+ goto destroy_pool;
+ }
+ hws_action = mh_data->bulk->hws_action;
+ mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
+ if (!mh_data->data) {
+ err = -ENOMEM;
+ goto release_mh;
+ }
+ modify_hdr->fs_hws_action.mh_data = mh_data;
+ modify_hdr->fs_hws_action.fs_pool = pool;
+ modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+ modify_hdr->fs_hws_action.hws_action = hws_action;
+
+ return 0;
+
+release_mh:
+ mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+destroy_pool:
+ if (!known_pattern)
+ mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
+ return err;
+}
+
+static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_fs_hws_mh *mh_data;
+ struct mlx5_fs_pool *pool;
+
+ if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) {
+ mlx5_core_err(ns->dev, "Failed release modify-header\n");
+ return;
+ }
+
+ mh_data = modify_hdr->fs_hws_action.mh_data;
+ kfree(mh_data->data);
+ pool = modify_hdr->fs_hws_action.fs_pool;
+ mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+ modify_hdr->fs_hws_action.mh_data = NULL;
+}
+
+int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
+{
+ enum mlx5_flow_namespace_type ns_type = pkt_reformat->ns_type;
+ struct mutex *lock = &pkt_reformat->fs_hws_action.lock;
+ u32 *id = &pkt_reformat->fs_hws_action.fw_reformat_id;
+ struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
+ struct mlx5_pkt_reformat_params params = { 0 };
+ struct mlx5_flow_root_namespace *ns;
+ struct mlx5_core_dev *dev;
+ int ret;
+
+ mutex_lock(lock);
+
+ if (*id != 0) {
+ *reformat_id = *id;
+ ret = 0;
+ goto unlock;
+ }
+
+ dev = mlx5hws_action_get_dev(pkt_reformat->fs_hws_action.hws_action);
+ if (!dev) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ns = mlx5_get_root_namespace(dev, ns_type);
+ if (!ns) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ params.type = pkt_reformat->reformat_type;
+ params.size = pkt_reformat->fs_hws_action.pr_data->data_size;
+ params.data = pkt_reformat->fs_hws_action.pr_data->data;
+
+ ret = mlx5_fs_cmd_get_fw_cmds()->
+ packet_reformat_alloc(ns, &params, ns_type, &fw_pkt_reformat);
+ if (ret)
+ goto unlock;
+
+ *id = fw_pkt_reformat.id;
+ *reformat_id = *id;
+ ret = 0;
+
+unlock:
+ mutex_unlock(lock);
+
+ return ret;
+}
+
+static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static u32 mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ if (ft_type != FS_FT_FDB)
+ return 0;
+
+ return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX |
+ MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX |
+ MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
+}
+
+bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
+{
+ return mlx5hws_is_supported(dev);
+}
+
+static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
+ .create_flow_table = mlx5_cmd_hws_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_hws_destroy_flow_table,
+ .modify_flow_table = mlx5_cmd_hws_modify_flow_table,
+ .update_root_ft = mlx5_cmd_hws_update_root_ft,
+ .create_flow_group = mlx5_cmd_hws_create_flow_group,
+ .destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
+ .create_fte = mlx5_cmd_hws_create_fte,
+ .delete_fte = mlx5_cmd_hws_delete_fte,
+ .update_fte = mlx5_cmd_hws_update_fte,
+ .packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_hws_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_hws_destroy_match_definer,
+ .create_ns = mlx5_cmd_hws_create_ns,
+ .destroy_ns = mlx5_cmd_hws_destroy_ns,
+ .set_peer = mlx5_cmd_hws_set_peer,
+ .get_capabilities = mlx5_cmd_hws_get_capabilities,
+};
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
+{
+ return &mlx5_flow_cmds_hws;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
new file mode 100644
index 000000000000..b92d55b2d147
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef _MLX5_FS_HWS_
+#define _MLX5_FS_HWS_
+
+#include "mlx5hws.h"
+#include "fs_hws_pools.h"
+
+struct mlx5_fs_hws_actions_pool {
+ struct mlx5hws_action *tag_action;
+ struct mlx5hws_action *pop_vlan_action;
+ struct mlx5hws_action *push_vlan_action;
+ struct mlx5hws_action *drop_action;
+ struct mlx5hws_action *decapl2_action;
+ struct mlx5hws_action *remove_hdr_vlan_action;
+ struct mlx5_fs_pool insert_hdr_pool;
+ struct mlx5_fs_pool dl3tnltol2_pool;
+ struct xarray el2tol3tnl_pools;
+ struct xarray el2tol2tnl_pools;
+ struct xarray mh_pools;
+ struct xarray table_dests;
+ struct xarray vport_vhca_dests;
+ struct xarray vport_dests;
+ struct xarray aso_meters;
+ struct xarray sample_dests;
+};
+
+struct mlx5_fs_hws_context {
+ struct mlx5hws_context *hws_ctx;
+ struct mlx5_fs_hws_actions_pool hws_pool;
+};
+
+struct mlx5_fs_hws_table {
+ struct mlx5hws_table *hws_table;
+ bool miss_ft_set;
+};
+
+struct mlx5_fs_hws_action {
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_pool *fs_pool;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_hws_mh *mh_data;
+ u32 fw_reformat_id;
+ /* Protect `fw_reformat_id` against being initialized from multiple
+ * threads.
+ */
+ struct mutex lock;
+};
+
+struct mlx5_fs_hws_matcher {
+ struct mlx5hws_bwc_matcher *matcher;
+};
+
+struct mlx5_fs_hws_rule_action {
+ struct mlx5hws_action *action;
+ union {
+ struct mlx5_fc *counter;
+ struct mlx5_exe_aso *exe_aso;
+ u32 sampler_id;
+ };
+};
+
+struct mlx5_fs_hws_rule {
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct mlx5_fs_hws_rule_action *hws_fs_actions;
+ int num_fs_actions;
+};
+
+struct mlx5_fs_hws_data {
+ struct mlx5hws_action *hws_action;
+ struct mutex lock; /* protects hws_action */
+ refcount_t hws_action_refcount;
+};
+
+struct mlx5_fs_hws_create_action_ctx {
+ enum mlx5hws_action_type actions_type;
+ struct mlx5hws_context *hws_ctx;
+ u32 id;
+ union {
+ u8 return_reg_id;
+ };
+};
+
+struct mlx5hws_action *
+mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
+ struct mlx5_fs_hws_create_action_ctx *create_ctx);
+void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data);
+
+#ifdef CONFIG_MLX5_HW_STEERING
+
+int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id);
+
+bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void);
+
+#else
+
+static inline int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_MLX5_HWS_STEERING */
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
new file mode 100644
index 000000000000..f1ecdba74e1f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_hws_pools.h"
+
+#define MLX5_FS_HWS_DEFAULT_BULK_LEN 65536
+#define MLX5_FS_HWS_POOL_MAX_THRESHOLD BIT(18)
+#define MLX5_FS_HWS_POOL_USED_BUFF_RATIO 10
+
+static struct mlx5hws_action *
+mlx5_fs_dl3tnltol2_bulk_action_create(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr[2] = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
+ reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX].sz = ETH_HLEN;
+ reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX].sz = ETH_HLEN + VLAN_HLEN;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 2,
+ reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_el2tol3tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
+ reformat_hdr.sz = data_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
+ &reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_el2tol2tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
+ reformat_hdr.sz = data_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
+ &reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_insert_hdr_bulk_action_create(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_insert_header insert_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ u32 log_bulk_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ insert_hdr.hdr.sz = MLX5_FS_INSERT_HDR_VLAN_SIZE;
+ insert_hdr.anchor = MLX5_FS_INSERT_HDR_VLAN_ANCHOR;
+ insert_hdr.offset = MLX5_FS_INSERT_HDR_VLAN_OFFSET;
+
+ return mlx5hws_action_create_insert_header(ctx, 1, &insert_hdr,
+ log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_pr_bulk_action_create(struct mlx5_core_dev *dev,
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5hws_context *ctx;
+ size_t encap_data_size;
+
+ root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+ return NULL;
+
+ ctx = root_ns->fs_hws_context.hws_ctx;
+ if (!ctx)
+ return NULL;
+
+ encap_data_size = pr_pool_ctx->encap_data_size;
+ switch (pr_pool_ctx->reformat_type) {
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ return mlx5_fs_dl3tnltol2_bulk_action_create(ctx);
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ return mlx5_fs_el2tol3tnl_bulk_action_create(ctx, encap_data_size);
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ return mlx5_fs_el2tol2tnl_bulk_action_create(ctx, encap_data_size);
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ return mlx5_fs_insert_hdr_bulk_action_create(ctx);
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+ int bulk_len;
+ int i;
+
+ if (!pool_ctx)
+ return NULL;
+ pr_pool_ctx = pool_ctx;
+ bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+ pr_bulk = kvzalloc(struct_size(pr_bulk, prs_data, bulk_len), GFP_KERNEL);
+ if (!pr_bulk)
+ return NULL;
+
+ if (mlx5_fs_bulk_init(dev, &pr_bulk->fs_bulk, bulk_len))
+ goto free_pr_bulk;
+
+ for (i = 0; i < bulk_len; i++) {
+ pr_bulk->prs_data[i].bulk = pr_bulk;
+ pr_bulk->prs_data[i].offset = i;
+ }
+
+ pr_bulk->hws_action = mlx5_fs_pr_bulk_action_create(dev, pr_pool_ctx);
+ if (!pr_bulk->hws_action)
+ goto cleanup_fs_bulk;
+
+ return &pr_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+ mlx5_fs_bulk_cleanup(&pr_bulk->fs_bulk);
+free_pr_bulk:
+ kvfree(pr_bulk);
+ return NULL;
+}
+
+static int
+mlx5_fs_hws_pr_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
+{
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+
+ pr_bulk = container_of(fs_bulk, struct mlx5_fs_hws_pr_bulk, fs_bulk);
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all reformats were released\n");
+ return -EBUSY;
+ }
+
+ mlx5hws_action_destroy(pr_bulk->hws_action);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(pr_bulk);
+
+ return 0;
+}
+
+static void mlx5_hws_pool_update_threshold(struct mlx5_fs_pool *hws_pool)
+{
+ hws_pool->threshold = min_t(int, MLX5_FS_HWS_POOL_MAX_THRESHOLD,
+ hws_pool->used_units / MLX5_FS_HWS_POOL_USED_BUFF_RATIO);
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_pr_pool_ops = {
+ .bulk_create = mlx5_fs_hws_pr_bulk_create,
+ .bulk_destroy = mlx5_fs_hws_pr_bulk_destroy,
+ .update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_core_dev *dev, size_t encap_data_size,
+ enum mlx5hws_action_type reformat_type)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+
+ if (reformat_type != MLX5HWS_ACTION_TYP_INSERT_HEADER &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2)
+ return -EOPNOTSUPP;
+
+ pr_pool_ctx = kzalloc(sizeof(*pr_pool_ctx), GFP_KERNEL);
+ if (!pr_pool_ctx)
+ return -ENOMEM;
+ pr_pool_ctx->reformat_type = reformat_type;
+ pr_pool_ctx->encap_data_size = encap_data_size;
+ mlx5_fs_pool_init(pr_pool, dev, &mlx5_fs_hws_pr_pool_ops, pr_pool_ctx);
+ return 0;
+}
+
+void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+
+ mlx5_fs_pool_cleanup(pr_pool);
+ pr_pool_ctx = pr_pool->pool_ctx;
+ if (!pr_pool_ctx)
+ return;
+ kfree(pr_pool_ctx);
+}
+
+struct mlx5_fs_hws_pr *
+mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool)
+{
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+ int err;
+
+ err = mlx5_fs_pool_acquire_index(pr_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ pr_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_pr_bulk,
+ fs_bulk);
+ return &pr_bulk->prs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_fs_hws_pr *pr_data)
+{
+ struct mlx5_fs_bulk *fs_bulk = &pr_data->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_core_dev *dev = pr_pool->dev;
+
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = pr_data->offset;
+ if (mlx5_fs_pool_release_index(pr_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release packet reformat which is not acquired\n");
+}
+
+struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data)
+{
+ return pr_data->bulk->hws_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_mh_bulk_action_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ u32 log_bulk_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_modify_header(ctx, 1, pattern,
+ log_bulk_size, flags);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+ struct mlx5hws_action_mh_pattern *pattern;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+ struct mlx5hws_context *ctx;
+ int bulk_len;
+
+ if (!pool_ctx)
+ return NULL;
+
+ root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+ return NULL;
+
+ ctx = root_ns->fs_hws_context.hws_ctx;
+ if (!ctx)
+ return NULL;
+
+ pattern = pool_ctx;
+ bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+ mh_bulk = kvzalloc(struct_size(mh_bulk, mhs_data, bulk_len), GFP_KERNEL);
+ if (!mh_bulk)
+ return NULL;
+
+ if (mlx5_fs_bulk_init(dev, &mh_bulk->fs_bulk, bulk_len))
+ goto free_mh_bulk;
+
+ for (int i = 0; i < bulk_len; i++) {
+ mh_bulk->mhs_data[i].bulk = mh_bulk;
+ mh_bulk->mhs_data[i].offset = i;
+ }
+
+ mh_bulk->hws_action = mlx5_fs_mh_bulk_action_create(ctx, pattern);
+ if (!mh_bulk->hws_action)
+ goto cleanup_fs_bulk;
+
+ return &mh_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+ mlx5_fs_bulk_cleanup(&mh_bulk->fs_bulk);
+free_mh_bulk:
+ kvfree(mh_bulk);
+ return NULL;
+}
+
+static int
+mlx5_fs_hws_mh_bulk_destroy(struct mlx5_core_dev *dev,
+ struct mlx5_fs_bulk *fs_bulk)
+{
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+
+ mh_bulk = container_of(fs_bulk, struct mlx5_fs_hws_mh_bulk, fs_bulk);
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all modify header were released\n");
+ return -EBUSY;
+ }
+
+ mlx5hws_action_destroy(mh_bulk->hws_action);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(mh_bulk);
+
+ return 0;
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_mh_pool_ops = {
+ .bulk_create = mlx5_fs_hws_mh_bulk_create,
+ .bulk_destroy = mlx5_fs_hws_mh_bulk_destroy,
+ .update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+ struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+
+ pool_pattern = kzalloc(sizeof(*pool_pattern), GFP_KERNEL);
+ if (!pool_pattern)
+ return -ENOMEM;
+ pool_pattern->data = kmemdup(pattern->data, pattern->sz, GFP_KERNEL);
+ if (!pool_pattern->data) {
+ kfree(pool_pattern);
+ return -ENOMEM;
+ }
+ pool_pattern->sz = pattern->sz;
+ mlx5_fs_pool_init(fs_hws_mh_pool, dev, &mlx5_fs_hws_mh_pool_ops,
+ pool_pattern);
+ return 0;
+}
+
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+
+ mlx5_fs_pool_cleanup(fs_hws_mh_pool);
+ pool_pattern = fs_hws_mh_pool->pool_ctx;
+ if (!pool_pattern)
+ return;
+ kfree(pool_pattern->data);
+ kfree(pool_pattern);
+}
+
+struct mlx5_fs_hws_mh *
+mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool)
+{
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+ int err;
+
+ err = mlx5_fs_pool_acquire_index(mh_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ mh_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_mh_bulk,
+ fs_bulk);
+ return &mh_bulk->mhs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+ struct mlx5_fs_hws_mh *mh_data)
+{
+ struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_core_dev *dev = mh_pool->dev;
+
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = mh_data->offset;
+ if (mlx5_fs_pool_release_index(mh_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release modify header which is not acquired\n");
+}
+
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+ int num_actions, i;
+
+ pool_pattern = mh_pool->pool_ctx;
+ if (WARN_ON_ONCE(!pool_pattern))
+ return false;
+
+ if (pattern->sz != pool_pattern->sz)
+ return false;
+ num_actions = pattern->sz / MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
+ for (i = 0; i < num_actions; i++) {
+ if ((__force __be32)pattern->data[i] !=
+ (__force __be32)pool_pattern->data[i])
+ return false;
+ }
+ return true;
+}
+
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5_fc_bulk *fc_bulk = counter->bulk;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.id = fc_bulk->base_id;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_CTR;
+
+ return mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx);
+}
+
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
+{
+ mlx5_fs_put_hws_action(&counter->bulk->hws_data);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
new file mode 100644
index 000000000000..34072551dd21
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_HWS_POOLS_H__
+#define __MLX5_FS_HWS_POOLS_H__
+
+#include <linux/if_vlan.h>
+#include "fs_pool.h"
+#include "fs_core.h"
+
+#define MLX5_FS_INSERT_HDR_VLAN_ANCHOR MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START
+#define MLX5_FS_INSERT_HDR_VLAN_OFFSET offsetof(struct vlan_ethhdr, h_vlan_proto)
+#define MLX5_FS_INSERT_HDR_VLAN_SIZE sizeof(struct vlan_hdr)
+
+enum {
+ MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX = 0,
+ MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX,
+};
+
+struct mlx5_fs_hws_pr {
+ struct mlx5_fs_hws_pr_bulk *bulk;
+ u32 offset;
+ u8 hdr_idx;
+ u8 *data;
+ size_t data_size;
+};
+
+struct mlx5_fs_hws_pr_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_hws_pr prs_data[];
+};
+
+struct mlx5_fs_hws_pr_pool_ctx {
+ enum mlx5hws_action_type reformat_type;
+ size_t encap_data_size;
+};
+
+struct mlx5_fs_hws_mh {
+ struct mlx5_fs_hws_mh_bulk *bulk;
+ u32 offset;
+ u8 *data;
+};
+
+struct mlx5_fs_hws_mh_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ struct mlx5_fs_pool *mh_pool;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_hws_mh mhs_data[];
+};
+
+int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_core_dev *dev, size_t encap_data_size,
+ enum mlx5hws_action_type reformat_type);
+void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool);
+
+struct mlx5_fs_hws_pr *mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool);
+void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_fs_hws_pr *pr_data);
+struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data);
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+ struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern);
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool);
+struct mlx5_fs_hws_mh *mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool);
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+ struct mlx5_fs_hws_mh *mh_data);
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+ struct mlx5hws_action_mh_pattern *pattern);
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter);
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter);
+#endif /* __MLX5_FS_HWS_POOLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
index 3c8635f286ce..21279d503117 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
@@ -17,6 +17,7 @@
#include "context.h"
#include "table.h"
#include "send.h"
+#include "action_ste_pool.h"
#include "rule.h"
#include "cmd.h"
#include "action.h"
@@ -39,7 +40,6 @@
#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg)
#define MLX5HWS_TABLE_TYPE_BASE 2
-#define MLX5HWS_ACTION_STE_IDX_ANY 0
static inline bool is_mem_zero(const u8 *mem, size_t size)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
index 1bb3a6f8c3cd..ce28ee1c0e41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
@@ -3,25 +3,6 @@
#include "internal.h"
-enum mlx5hws_matcher_rtc_type {
- HWS_MATCHER_RTC_TYPE_MATCH,
- HWS_MATCHER_RTC_TYPE_STE_ARRAY,
- HWS_MATCHER_RTC_TYPE_MAX,
-};
-
-static const char * const mlx5hws_matcher_rtc_type_str[] = {
- [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH",
- [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY",
- [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN",
-};
-
-static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type)
-{
- if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX)
- rtc_type = HWS_MATCHER_RTC_TYPE_MAX;
- return mlx5hws_matcher_rtc_type_str[rtc_type];
-}
-
static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules)
{
/* Collision table concatenation is done only for large rule tables */
@@ -42,19 +23,199 @@ static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher)
mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id);
}
+int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl,
+ u32 miss_ft_id)
+{
+ struct mlx5hws_matcher *tmp_matcher;
+
+ if (list_empty(&tbl->matchers_list))
+ return -EINVAL;
+
+ /* Update isolated_matcher_end_ft_id attribute for all
+ * the matchers in isolated table.
+ */
+ list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node)
+ tmp_matcher->attr.isolated_matcher_end_ft_id = miss_ft_id;
+
+ tmp_matcher = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+
+ return mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ tmp_matcher->end_ft_id,
+ tbl->fw_ft_type,
+ miss_ft_id);
+}
+
+static int hws_matcher_connect_end_ft_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ u32 end_ft_id;
+ int ret;
+
+ /* Reset end_ft next RTCs */
+ ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx,
+ matcher->end_ft_id,
+ matcher->tbl->fw_ft_type,
+ 0, 0);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n");
+ return ret;
+ }
+
+ /* Connect isolated matcher's end_ft to the complex matcher's end FT */
+ end_ft_id = matcher->attr.isolated_matcher_end_ft_id;
+ ret = mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ matcher->end_ft_id,
+ matcher->tbl->fw_ft_type,
+ end_ft_id);
+
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hws_matcher_create_end_ft_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ int ret;
+
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ &matcher->end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n");
+ return ret;
+ }
+
+ ret = hws_matcher_connect_end_ft_isolated(matcher);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to connect end FT\n");
+ goto destroy_default_ft;
+ }
+
+ return 0;
+
+destroy_default_ft:
+ mlx5hws_table_destroy_default_ft(tbl, matcher->end_ft_id);
+ return ret;
+}
+
static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_table *tbl = matcher->tbl;
int ret;
- ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id);
+ if (mlx5hws_matcher_is_isolated(matcher))
+ ret = hws_matcher_create_end_ft_isolated(matcher);
+ else
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl,
+ &matcher->end_ft_id);
+
if (ret) {
mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n");
return ret;
}
+
+ return 0;
+}
+
+static int hws_matcher_connect_isolated_first(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ /* Isolated matcher's end_ft is already pointing to the end_ft
+ * of the complex matcher - it was set at creation of end_ft,
+ * so no need to connect it.
+ * We still need to connect the isolated table's start FT to
+ * this matcher's RTC.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to connect start FT to match RTC\n");
+ return ret;
+ }
+
+ /* Reset table's FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to reset table ft default miss\n");
+ return ret;
+ }
+
+ list_add(&matcher->list_node, &tbl->matchers_list);
+
+ return ret;
+}
+
+static int hws_matcher_connect_isolated_last(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *last;
+ int ret;
+
+ last = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+
+ /* New matcher's end_ft is already pointing to the end_ft of
+ * the complex matcher.
+ * Connect previous matcher's end_ft to this new matcher RTC.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ last->end_ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Isolated matcher: failed to connect matcher end_ft to new match RTC\n");
+ return ret;
+ }
+
+ /* Reset prev matcher FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, last->end_ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to reset matcher ft default miss\n");
+ return ret;
+ }
+
+ /* Insert after the last matcher */
+ list_add(&matcher->list_node, &last->list_node);
+
return 0;
}
+static int hws_matcher_connect_isolated(struct mlx5hws_matcher *matcher)
+{
+ /* Isolated matcher is expected to be the only one in its table.
+ * However, it can have a collision matcher, and it can go through
+ * rehash process, in which case we will temporary have both old and
+ * new matchers in the isolated table.
+ * Check if this is the first matcher in the isolated table.
+ */
+ if (list_empty(&matcher->tbl->matchers_list))
+ return hws_matcher_connect_isolated_first(matcher);
+
+ /* If this wasn't the first matcher, then we have 3 possible cases:
+ * - this is a collision matcher for the first matcher
+ * - this is a new rehash dest matcher
+ * - this is a collision matcher for the new rehash dest matcher
+ * The logic to add new matcher is the same for all these cases.
+ */
+ return hws_matcher_connect_isolated_last(matcher);
+}
+
static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_table *tbl = matcher->tbl;
@@ -64,6 +225,9 @@ static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
struct mlx5hws_matcher *tmp_matcher;
int ret;
+ if (mlx5hws_matcher_is_isolated(matcher))
+ return hws_matcher_connect_isolated(matcher);
+
/* Find location in matcher list */
if (list_empty(&tbl->matchers_list)) {
list_add(&matcher->list_node, &tbl->matchers_list);
@@ -140,6 +304,92 @@ remove_from_list:
return ret;
}
+static int hws_matcher_disconnect_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher *first, *last, *prev, *next;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u32 end_ft_id;
+ int ret;
+
+ first = list_first_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ last = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ prev = list_prev_entry(matcher, list_node);
+ next = list_next_entry(matcher, list_node);
+
+ list_del_init(&matcher->list_node);
+
+ if (first == last) {
+ /* This was the only matcher in the list.
+ * Reset isolated table FT next RTCs and connect it
+ * to the whole complex matcher end FT instead.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ 0, 0);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n");
+ return ret;
+ }
+
+ end_ft_id = matcher->attr.isolated_matcher_end_ft_id;
+ ret = mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* At this point we know that there are more matchers in the list */
+
+ if (matcher == first) {
+ /* We've disconnected the first matcher.
+ * Now update isolated table default FT.
+ */
+ if (!next)
+ return -EINVAL;
+ return mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ }
+
+ if (matcher == last) {
+ /* If we've disconnected the last matcher - update prev
+ * matcher's end_ft to point to the complex matcher end_ft.
+ */
+ if (!prev)
+ return -EINVAL;
+ return hws_matcher_connect_end_ft_isolated(prev);
+ }
+
+ /* This wasn't the first or the last matcher, which means that it has
+ * both prev and next matchers. Note that this only happens if we're
+ * disconnecting collision matcher of the old matcher during rehash.
+ */
+ if (!prev || !next ||
+ !(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ return -EINVAL;
+
+ /* Update prev end FT to point to next match RTC */
+ return mlx5hws_table_ft_set_next_rtc(ctx,
+ prev->end_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+}
+
static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher *next = NULL, *prev = NULL;
@@ -147,6 +397,9 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
u32 prev_ft_id = tbl->ft_id;
int ret;
+ if (mlx5hws_matcher_is_isolated(matcher))
+ return hws_matcher_disconnect_isolated(matcher);
+
if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) {
prev = list_prev_entry(matcher, list_node);
prev_ft_id = prev->end_ft_id;
@@ -165,14 +418,14 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
next->match_ste.rtc_0_id,
next->match_ste.rtc_1_id);
if (ret) {
- mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect matcher\n");
+ return ret;
}
} else {
ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
if (ret) {
- mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect last matcher\n");
+ return ret;
}
}
@@ -180,169 +433,113 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
if (prev_ft_id == tbl->ft_id) {
ret = mlx5hws_table_update_connected_miss_tables(tbl);
if (ret) {
- mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx,
+ "Fatal error, failed to update connected miss table\n");
+ return ret;
}
}
ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
if (ret) {
mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
- goto matcher_reconnect;
+ return ret;
}
return 0;
-
-matcher_reconnect:
- if (list_empty(&tbl->matchers_list) || !prev)
- list_add(&matcher->list_node, &tbl->matchers_list);
- else
- /* insert after prev matcher */
- list_add(&matcher->list_node, &prev->list_node);
-
- return ret;
}
static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
- enum mlx5hws_matcher_rtc_type rtc_type,
bool is_mirror)
{
- struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste;
enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
- bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) ||
(flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) {
/* Optimize FDB RTC */
rtc_attr->log_size = 0;
rtc_attr->log_depth = 0;
- } else {
- /* Keep original values */
- rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order;
- rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0;
}
}
-static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type,
- u8 action_ste_selector)
+static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
struct mlx5hws_match_template *mt = matcher->mt;
struct mlx5hws_context *ctx = matcher->tbl->ctx;
- struct mlx5hws_action_default_stc *default_stc;
- struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_pool *ste_pool, *stc_pool;
- struct mlx5hws_pool_chunk *ste;
- u32 *rtc_0_id, *rtc_1_id;
u32 obj_id;
int ret;
- switch (rtc_type) {
- case HWS_MATCHER_RTC_TYPE_MATCH:
- rtc_0_id = &matcher->match_ste.rtc_0_id;
- rtc_1_id = &matcher->match_ste.rtc_1_id;
- ste_pool = matcher->match_ste.pool;
- ste = &matcher->match_ste.ste;
- ste->order = attr->table.sz_col_log + attr->table.sz_row_log;
-
- rtc_attr.log_size = attr->table.sz_row_log;
- rtc_attr.log_depth = attr->table.sz_col_log;
- rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
- rtc_attr.is_scnd_range = 0;
- rtc_attr.miss_ft_id = matcher->end_ft_id;
-
- if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
- /* The usual Hash Table */
- rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
-
- /* The first mt is used since all share the same definer */
- rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
- } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
- rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
- rtc_attr.num_hash_definer = 1;
-
- if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
- /* Hash Split Table */
- rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
- rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
- } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
- /* Linear Lookup Table */
- rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
- rtc_attr.match_definer_0 = ctx->caps->linear_match_definer;
- }
- }
-
- /* Match pool requires implicit allocation */
- ret = mlx5hws_pool_chunk_alloc(ste_pool, ste);
- if (ret) {
- mlx5hws_err(ctx, "Failed to allocate STE for %s RTC",
- hws_matcher_rtc_type_to_str(rtc_type));
- return ret;
+ rtc_attr.log_size = attr->table.sz_row_log;
+ rtc_attr.log_depth = attr->table.sz_col_log;
+ rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ rtc_attr.is_scnd_range = 0;
+ rtc_attr.miss_ft_id = matcher->end_ft_id;
+
+ if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+ /* The usual Hash Table */
+ rtc_attr.update_index_mode =
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+
+ /* The first mt is used since all share the same definer */
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
+ rtc_attr.update_index_mode =
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ rtc_attr.num_hash_definer = 1;
+
+ if (attr->distribute_mode ==
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ rtc_attr.access_index_mode =
+ MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 =
+ mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->distribute_mode ==
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ rtc_attr.access_index_mode =
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
+ rtc_attr.match_definer_0 =
+ ctx->caps->linear_match_definer;
}
- break;
-
- case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste[action_ste_selector];
-
- rtc_0_id = &action_ste->rtc_0_id;
- rtc_1_id = &action_ste->rtc_1_id;
- ste_pool = action_ste->pool;
- ste = &action_ste->ste;
- ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- attr->table.sz_row_log;
- rtc_attr.log_size = ste->order;
- rtc_attr.log_depth = 0;
- rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
- /* The action STEs use the default always hit definer */
- rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
- rtc_attr.is_frst_jumbo = false;
- rtc_attr.miss_ft_id = 0;
- break;
-
- default:
- mlx5hws_err(ctx, "HWS Invalid RTC type\n");
- return -EINVAL;
}
- obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_id(matcher->match_ste.pool);
rtc_attr.pd = ctx->pd_num;
rtc_attr.ste_base = obj_id;
- rtc_attr.ste_offset = ste->offset;
rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false);
- hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, false);
/* STC is a single resource (obj_id), use any STC for the ID */
- stc_pool = ctx->stc_pool[tbl->type];
- default_stc = ctx->common_res[tbl->type].default_stc;
- obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_id(ctx->stc_pool);
rtc_attr.stc_base = obj_id;
- ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr,
+ &matcher->match_ste.rtc_0_id);
if (ret) {
- mlx5hws_err(ctx, "Failed to create matcher RTC of type %s",
- hws_matcher_rtc_type_to_str(rtc_type));
- goto free_ste;
+ mlx5hws_err(ctx, "Failed to create matcher RTC\n");
+ return ret;
}
if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_mirror_id(
+ matcher->match_ste.pool);
rtc_attr.ste_base = obj_id;
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true);
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_mirror_id(ctx->stc_pool);
rtc_attr.stc_base = obj_id;
- hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, true);
- ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr,
+ &matcher->match_ste.rtc_1_id);
if (ret) {
- mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s",
- hws_matcher_rtc_type_to_str(rtc_type));
+ mlx5hws_err(ctx, "Failed to create mirror matcher RTC\n");
goto destroy_rtc_0;
}
}
@@ -350,47 +547,18 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
return 0;
destroy_rtc_0:
- mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
-free_ste:
- if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
- mlx5hws_pool_chunk_free(ste_pool, ste);
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, matcher->match_ste.rtc_0_id);
return ret;
}
-static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type,
- u8 action_ste_selector)
+static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher)
{
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_pool_chunk *ste;
- struct mlx5hws_pool *ste_pool;
- u32 rtc_0_id, rtc_1_id;
-
- switch (rtc_type) {
- case HWS_MATCHER_RTC_TYPE_MATCH:
- rtc_0_id = matcher->match_ste.rtc_0_id;
- rtc_1_id = matcher->match_ste.rtc_1_id;
- ste_pool = matcher->match_ste.pool;
- ste = &matcher->match_ste.ste;
- break;
- case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste[action_ste_selector];
- rtc_0_id = action_ste->rtc_0_id;
- rtc_1_id = action_ste->rtc_1_id;
- ste_pool = action_ste->pool;
- ste = &action_ste->ste;
- break;
- default:
- return;
- }
+ struct mlx5_core_dev *mdev = matcher->tbl->ctx->mdev;
- if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id);
+ if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ mlx5hws_cmd_rtc_destroy(mdev, matcher->match_ste.rtc_1_id);
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id);
- if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
- mlx5hws_pool_chunk_free(ste_pool, ste);
+ mlx5hws_cmd_rtc_destroy(mdev, matcher->match_ste.rtc_0_id);
}
static int
@@ -458,170 +626,17 @@ static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
return 0;
}
-static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
-{
- struct mlx5hws_matcher_resize_data *resize_data;
-
- resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL);
- if (!resize_data)
- return -ENOMEM;
-
- resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
-
- resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc;
- resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id;
- resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id;
- resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ?
- src_matcher->action_ste[0].pool :
- NULL;
- resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc;
- resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id;
- resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id;
- resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ?
- src_matcher->action_ste[1].pool :
- NULL;
-
- /* Place the new resized matcher on the dst matcher's list */
- list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
-
- /* Move all the previous resized matchers to the dst matcher's list */
- while (!list_empty(&src_matcher->resize_data)) {
- resize_data = list_first_entry(&src_matcher->resize_data,
- struct mlx5hws_matcher_resize_data,
- list_node);
- list_del_init(&resize_data->list_node);
- list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
- }
-
- return 0;
-}
-
-static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
-{
- struct mlx5hws_matcher_resize_data *resize_data;
-
- if (!mlx5hws_matcher_is_resizable(matcher))
- return;
-
- while (!list_empty(&matcher->resize_data)) {
- resize_data = list_first_entry(&matcher->resize_data,
- struct mlx5hws_matcher_resize_data,
- list_node);
- list_del_init(&resize_data->list_node);
-
- if (resize_data->max_stes) {
- mlx5hws_action_free_single_stc(matcher->tbl->ctx,
- matcher->tbl->type,
- &resize_data->action_ste[1].stc);
- mlx5hws_action_free_single_stc(matcher->tbl->ctx,
- matcher->tbl->type,
- &resize_data->action_ste[0].stc);
-
- if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[1].rtc_1_id);
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[0].rtc_1_id);
- }
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[1].rtc_0_id);
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[0].rtc_0_id);
- if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) {
- mlx5hws_pool_destroy(resize_data->action_ste[1].pool);
- mlx5hws_pool_destroy(resize_data->action_ste[0].pool);
- }
- }
-
- kfree(resize_data);
- }
-}
-
-static int
-hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
-{
- struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_pool_attr pool_attr = {0};
- struct mlx5hws_context *ctx = tbl->ctx;
- int ret;
-
- action_ste = &matcher->action_ste[action_ste_selector];
-
- /* Allocate action STE mempool */
- pool_attr.table_type = tbl->type;
- pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
- pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- matcher->attr.table.sz_row_log;
- hws_matcher_set_pool_attr(&pool_attr, matcher);
- action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
- if (!action_ste->pool) {
- mlx5hws_err(ctx, "Failed to create action ste pool\n");
- return -EINVAL;
- }
-
- /* Allocate action RTC */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
- if (ret) {
- mlx5hws_err(ctx, "Failed to create action RTC\n");
- goto free_ste_pool;
- }
-
- /* Allocate STC for jumps to STE */
- stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
- stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
- stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
- stc_attr.ste_table.ste = action_ste->ste;
- stc_attr.ste_table.ste_pool = action_ste->pool;
- stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
-
- ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type,
- &action_ste->stc);
- if (ret) {
- mlx5hws_err(ctx, "Failed to create action jump to table STC\n");
- goto free_rtc;
- }
-
- return 0;
-
-free_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
-free_ste_pool:
- mlx5hws_pool_destroy(action_ste->pool);
- return ret;
-}
-
-static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
-{
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_table *tbl = matcher->tbl;
-
- action_ste = &matcher->action_ste[action_ste_selector];
-
- if (!action_ste->max_stes ||
- matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
- mlx5hws_matcher_is_in_resize(matcher))
- return;
-
- mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
- mlx5hws_pool_destroy(action_ste->pool);
-}
-
static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
{
bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_context *ctx = tbl->ctx;
- u32 required_stes;
- u8 max_stes = 0;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 required_stes, max_stes;
int i, ret;
if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
return 0;
+ max_stes = 0;
for (i = 0; i < matcher->num_of_at; i++) {
struct mlx5hws_action_template *at = &matcher->at[i];
@@ -637,32 +652,33 @@ static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
/* Future: Optimize reparse */
}
- /* There are no additional STEs required for matcher */
- if (!max_stes)
- return 0;
-
- matcher->action_ste[0].max_stes = max_stes;
- matcher->action_ste[1].max_stes = max_stes;
-
- ret = hws_matcher_bind_at_idx(matcher, 0);
- if (ret)
- return ret;
-
- ret = hws_matcher_bind_at_idx(matcher, 1);
- if (ret)
- goto free_at_0;
+ matcher->num_of_action_stes = max_stes;
return 0;
-
-free_at_0:
- hws_matcher_unbind_at_idx(matcher, 0);
- return ret;
}
-static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
+static void hws_matcher_set_ip_version_match(struct mlx5hws_matcher *matcher)
{
- hws_matcher_unbind_at_idx(matcher, 1);
- hws_matcher_unbind_at_idx(matcher, 0);
+ int i;
+
+ for (i = 0; i < matcher->mt->fc_sz; i++) {
+ switch (matcher->mt->fc[i].fname) {
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O:
+ matcher->matches_outer_ethertype = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O:
+ matcher->matches_outer_ip_version = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I:
+ matcher->matches_inner_ethertype = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I:
+ matcher->matches_inner_ip_version = 1;
+ break;
+ default:
+ break;
+ }
+ }
}
static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
@@ -681,10 +697,11 @@ static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
}
}
+ hws_matcher_set_ip_version_match(matcher);
+
/* Create an STE pool per matcher*/
pool_attr.table_type = matcher->tbl->type;
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL;
pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
matcher->attr.table.sz_row_log;
hws_matcher_set_pool_attr(&pool_attr, matcher);
@@ -786,6 +803,8 @@ hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log);
matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0;
+ matcher->flags |= attr->isolated_matcher_end_ft_id ?
+ MLX5HWS_MATCHER_FLAGS_ISOLATED : 0;
return hws_matcher_check_attr_sz(caps, matcher);
}
@@ -807,10 +826,10 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
/* Create matcher end flow table anchor */
ret = hws_matcher_create_end_ft(matcher);
if (ret)
- goto unbind_at;
+ goto unbind_mt;
/* Allocate the RTC for the new matcher */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ ret = hws_matcher_create_rtc(matcher);
if (ret)
goto destroy_end_ft;
@@ -822,11 +841,9 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
return 0;
destroy_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_rtc(matcher);
destroy_end_ft:
hws_matcher_destroy_end_ft(matcher);
-unbind_at:
- hws_matcher_unbind_at(matcher);
unbind_mt:
hws_matcher_unbind_mt(matcher);
return ret;
@@ -834,11 +851,9 @@ unbind_mt:
static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
{
- hws_matcher_resize_uninit(matcher);
hws_matcher_disconnect(matcher);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_rtc(matcher);
hws_matcher_destroy_end_ft(matcher);
- hws_matcher_unbind_at(matcher);
hws_matcher_unbind_mt(matcher);
}
@@ -860,8 +875,6 @@ hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
if (!col_matcher)
return -ENOMEM;
- INIT_LIST_HEAD(&col_matcher->resize_data);
-
col_matcher->tbl = matcher->tbl;
col_matcher->mt = matcher->mt;
col_matcher->at = matcher->at;
@@ -878,6 +891,8 @@ hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach;
+ col_matcher->attr.isolated_matcher_end_ft_id =
+ matcher->attr.isolated_matcher_end_ft_id;
ret = hws_matcher_process_attr(ctx->caps, col_matcher);
if (ret)
@@ -915,8 +930,6 @@ static int hws_matcher_init(struct mlx5hws_matcher *matcher)
struct mlx5hws_context *ctx = matcher->tbl->ctx;
int ret;
- INIT_LIST_HEAD(&matcher->resize_data);
-
mutex_lock(&ctx->ctrl_lock);
/* Allocate matcher resource and connect to the packet pipe */
@@ -951,18 +964,44 @@ static int hws_matcher_uninit(struct mlx5hws_matcher *matcher)
return 0;
}
+static int hws_matcher_grow_at_array(struct mlx5hws_matcher *matcher)
+{
+ void *p;
+
+ if (matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT)
+ return -ENOMEM;
+
+ matcher->size_of_at_array *= 2;
+ p = krealloc(matcher->at,
+ matcher->size_of_at_array * sizeof(*matcher->at),
+ __GFP_ZERO | GFP_KERNEL);
+ if (!p) {
+ matcher->size_of_at_array /= 2;
+ return -ENOMEM;
+ }
+
+ matcher->at = p;
+
+ return 0;
+}
+
int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
struct mlx5hws_action_template *at)
{
bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
- struct mlx5hws_context *ctx = matcher->tbl->ctx;
u32 required_stes;
int ret;
- if (!matcher->attr.max_num_of_at_attach) {
- mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n",
- matcher->num_of_at);
- return -EOPNOTSUPP;
+ if (unlikely(matcher->num_of_at >= matcher->size_of_at_array)) {
+ ret = hws_matcher_grow_at_array(matcher);
+ if (ret)
+ return ret;
+
+ if (matcher->col_matcher) {
+ ret = hws_matcher_grow_at_array(matcher->col_matcher);
+ if (ret)
+ return ret;
+ }
}
ret = hws_matcher_check_and_process_at(matcher, at);
@@ -970,16 +1009,11 @@ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
return ret;
required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
- if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {
- mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
- required_stes,
- matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes);
- return -ENOMEM;
- }
+ if (matcher->num_of_action_stes < required_stes)
+ matcher->num_of_action_stes = required_stes;
matcher->at[matcher->num_of_at] = *at;
matcher->num_of_at += 1;
- matcher->attr.max_num_of_at_attach -= 1;
if (matcher->col_matcher)
matcher->col_matcher->num_of_at = matcher->num_of_at;
@@ -1007,9 +1041,10 @@ hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
if (!matcher->mt)
return -ENOMEM;
- matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
- sizeof(*matcher->at),
- GFP_KERNEL);
+ matcher->size_of_at_array =
+ num_of_at + matcher->attr.max_num_of_at_attach;
+ matcher->at = kvcalloc(matcher->size_of_at_array, sizeof(*matcher->at),
+ GFP_KERNEL);
if (!matcher->at) {
mlx5hws_err(ctx, "Failed to allocate action template array\n");
ret = -ENOMEM;
@@ -1035,7 +1070,7 @@ free_mt:
static void
hws_matcher_unset_templates(struct mlx5hws_matcher *matcher)
{
- kfree(matcher->at);
+ kvfree(matcher->at);
kfree(matcher->mt);
}
@@ -1157,8 +1192,7 @@ static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
return -EINVAL;
}
- if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes >
- dst_matcher->action_ste[0].max_stes) {
+ if (src_matcher->num_of_action_stes > dst_matcher->num_of_action_stes) {
mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
return -EINVAL;
}
@@ -1187,10 +1221,6 @@ int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
src_matcher->resize_dst = dst_matcher;
- ret = hws_matcher_resize_init(src_matcher);
- if (ret)
- src_matcher->resize_dst = NULL;
-
out:
mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
index 81ff487f57be..32e83cddcd60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
@@ -18,6 +18,14 @@
/* Required depth of the main large table */
#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
+/* Action RTC size multiplier that is required in order
+ * to support rule update for rules with action STEs.
+ */
+#define MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT 1
+
+/* Maximum number of action templates that can be attached to a matcher. */
+#define MLX5HWS_MATCHER_MAX_AT 128
+
enum mlx5hws_matcher_offset {
MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
@@ -26,6 +34,7 @@ enum mlx5hws_matcher_offset {
enum mlx5hws_matcher_flags {
MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2,
MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3,
+ MLX5HWS_MATCHER_FLAGS_ISOLATED = 1 << 4,
};
struct mlx5hws_match_template {
@@ -37,32 +46,15 @@ struct mlx5hws_match_template {
};
struct mlx5hws_matcher_match_ste {
- struct mlx5hws_pool_chunk ste;
- u32 rtc_0_id;
- u32 rtc_1_id;
- struct mlx5hws_pool *pool;
-};
-
-struct mlx5hws_matcher_action_ste {
- struct mlx5hws_pool_chunk ste;
- struct mlx5hws_pool_chunk stc;
- u32 rtc_0_id;
- u32 rtc_1_id;
- struct mlx5hws_pool *pool;
- u8 max_stes;
-};
-
-struct mlx5hws_matcher_resize_data_node {
- struct mlx5hws_pool_chunk stc;
u32 rtc_0_id;
u32 rtc_1_id;
struct mlx5hws_pool *pool;
};
-struct mlx5hws_matcher_resize_data {
- struct mlx5hws_matcher_resize_data_node action_ste[2];
- u8 max_stes;
- struct list_head list_node;
+enum {
+ MLX5HWS_MATCHER_IPV_UNSET = 0,
+ MLX5HWS_MATCHER_IPV_4 = 1,
+ MLX5HWS_MATCHER_IPV_6 = 2,
};
struct mlx5hws_matcher {
@@ -71,16 +63,22 @@ struct mlx5hws_matcher {
struct mlx5hws_match_template *mt;
struct mlx5hws_action_template *at;
u8 num_of_at;
+ u8 size_of_at_array;
u8 num_of_mt;
+ u8 num_of_action_stes;
/* enum mlx5hws_matcher_flags */
u8 flags;
+ u8 matches_outer_ethertype:1;
+ u8 matches_outer_ip_version:1;
+ u8 matches_inner_ethertype:1;
+ u8 matches_inner_ip_version:1;
+ u8 outer_ip_version:2;
+ u8 inner_ip_version:2;
u32 end_ft_id;
struct mlx5hws_matcher *col_matcher;
struct mlx5hws_matcher *resize_dst;
struct mlx5hws_matcher_match_ste match_ste;
- struct mlx5hws_matcher_action_ste action_ste[2];
struct list_head list_node;
- struct list_head resize_data;
};
static inline bool
@@ -99,9 +97,17 @@ static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher)
return !!matcher->resize_dst;
}
+static inline bool mlx5hws_matcher_is_isolated(struct mlx5hws_matcher *matcher)
+{
+ return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED);
+}
+
static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher)
{
return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX;
}
+int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl,
+ u32 miss_ft_id);
+
#endif /* HWS_MATCHER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index f39d636ff39a..d8ac6c196211 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -70,7 +70,6 @@ enum mlx5hws_send_queue_actions {
struct mlx5hws_context_attr {
u16 queues;
u16 queue_size;
- bool bwc; /* add support for backward compatible API*/
};
struct mlx5hws_table_attr {
@@ -120,6 +119,8 @@ struct mlx5hws_matcher_attr {
};
/* Optional AT attach configuration - Max number of additional AT */
u8 max_num_of_at_attach;
+ /* Optional end FT (miss FT ID) for match RTC (for isolated matcher) */
+ u32 isolated_matcher_end_ft_id;
};
struct mlx5hws_rule_attr {
@@ -212,6 +213,7 @@ struct mlx5hws_action_dest_attr {
struct mlx5hws_action *dest;
/* Optional reformat action */
struct mlx5hws_action *reformat;
+ bool is_wire_ft;
};
/**
@@ -503,6 +505,15 @@ enum mlx5hws_action_type
mlx5hws_action_get_type(struct mlx5hws_action *action);
/**
+ * mlx5hws_action_get_dev - Get mlx5 core device.
+ *
+ * @action: The action to get the device from.
+ *
+ * Return: mlx5 core device.
+ */
+struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action);
+
+/**
* mlx5hws_action_create_dest_drop - Create a direct rule drop action.
*
* @ctx: The context in which the new action will be created.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index 06db5e4726ae..51e4c551e0ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -153,8 +153,7 @@ mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
if (cached_pattern) {
/* LRU: move it to be first in the list */
- list_del_init(&cached_pattern->ptrn_list_node);
- list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ list_move(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
cached_pattern->refcount++;
}
@@ -344,7 +343,7 @@ void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
memset(wqe_ctrl, 0, wqe_len);
mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
- memcpy(wqe_arg, arg_data, wqe_len);
+ memcpy(wqe_arg, arg_data, MLX5HWS_ARG_DATA_SIZE);
send_attr.id = arg_idx++;
mlx5hws_send_engine_post_end(&ctrl, &send_attr);
@@ -491,8 +490,8 @@ hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
switch (action_type) {
case MLX5_ACTION_TYPE_SET:
case MLX5_ACTION_TYPE_ADD:
- *src_field = MLX5_GET(set_action_in, pattern, field);
- *dst_field = INVALID_FIELD;
+ *src_field = INVALID_FIELD;
+ *dst_field = MLX5_GET(set_action_in, pattern, field);
break;
case MLX5_ACTION_TYPE_COPY:
*src_field = MLX5_GET(copy_action_in, pattern, src_field);
@@ -523,57 +522,59 @@ bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], s
return true;
}
-void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
- size_t max_actions, size_t *new_size,
- u32 *nope_location, __be64 *new_pat)
+int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nop_locations, __be64 *new_pat)
{
- u16 prev_src_field = 0, prev_dst_field = 0;
+ u16 prev_src_field = INVALID_FIELD, prev_dst_field = INVALID_FIELD;
u16 src_field, dst_field;
u8 action_type;
+ bool dependent;
size_t i, j;
*new_size = num_actions;
- *nope_location = 0;
+ *nop_locations = 0;
if (num_actions == 1)
- return;
+ return 0;
for (i = 0, j = 0; i < num_actions; i++, j++) {
- action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
+ if (j >= max_actions)
+ return -EINVAL;
+ action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
hws_action_modify_get_target_fields(action_type, &pattern[i],
&src_field, &dst_field);
- if (i % 2) {
- if (action_type == MLX5_ACTION_TYPE_COPY &&
- (prev_src_field == src_field ||
- prev_dst_field == dst_field)) {
- /* need Nope */
- *new_size += 1;
- *nope_location |= BIT(i);
- memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
- MLX5_SET(set_action_in, &new_pat[j],
- action_type,
- MLX5_MODIFICATION_TYPE_NOP);
- j++;
- } else if (prev_src_field == src_field) {
- /* need Nope*/
- *new_size += 1;
- *nope_location |= BIT(i);
- MLX5_SET(set_action_in, &new_pat[j],
- action_type,
- MLX5_MODIFICATION_TYPE_NOP);
- j++;
- }
- }
- memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
- /* check if no more space */
- if (j > max_actions) {
- *new_size = num_actions;
- *nope_location = 0;
- return;
+
+ /* For every action, look at it and the previous one. The two
+ * actions are dependent if:
+ */
+ dependent =
+ (i > 0) &&
+ /* At least one of the actions is a write and */
+ (dst_field != INVALID_FIELD ||
+ prev_dst_field != INVALID_FIELD) &&
+ /* One reads from the other's source */
+ (dst_field == prev_src_field ||
+ src_field == prev_dst_field ||
+ /* Or both write to the same destination */
+ dst_field == prev_dst_field);
+
+ if (dependent) {
+ *new_size += 1;
+ *nop_locations |= BIT(i);
+ memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
+ MLX5_SET(set_action_in, &new_pat[j], action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ if (j >= max_actions)
+ return -EINVAL;
}
+ memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
prev_src_field = src_field;
prev_dst_field = dst_field;
}
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
index 27ca93385b08..7fbd8dc7aa18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
@@ -31,7 +31,7 @@ struct mlx5hws_pattern_cache_item {
u8 *data;
u16 num_of_actions;
} mh_data;
- u32 refcount;
+ u32 refcount; /* protected by pattern_cache lock */
struct list_head ptrn_list_node;
};
@@ -96,6 +96,7 @@ int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
u8 *arg_data,
size_t data_size);
-void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions,
- size_t *new_size, u32 *nope_location, __be64 *new_pat);
+int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nop_locations, __be64 *new_pat);
#endif /* MLX5HWS_PAT_ARG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
index fed2d913f3b8..7e37d6e9eb83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -20,15 +20,14 @@ static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
kfree(resource);
}
-static void hws_pool_resource_free(struct mlx5hws_pool *pool,
- int resource_idx)
+static void hws_pool_resource_free(struct mlx5hws_pool *pool)
{
- hws_pool_free_one_resource(pool->resource[resource_idx]);
- pool->resource[resource_idx] = NULL;
+ hws_pool_free_one_resource(pool->resource);
+ pool->resource = NULL;
if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
- hws_pool_free_one_resource(pool->mirror_resource[resource_idx]);
- pool->mirror_resource[resource_idx] = NULL;
+ hws_pool_free_one_resource(pool->mirror_resource);
+ pool->mirror_resource = NULL;
}
}
@@ -61,10 +60,8 @@ hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range,
ret = -EINVAL;
}
- if (ret) {
- mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n");
+ if (ret)
goto free_resource;
- }
resource->pool = pool;
resource->range = 1 << log_range;
@@ -77,199 +74,114 @@ free_resource:
return NULL;
}
-static int
-hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx)
+static int hws_pool_resource_alloc(struct mlx5hws_pool *pool)
{
struct mlx5hws_pool_resource *resource;
u32 fw_ft_type, opt_log_range;
fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false);
- opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range;
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ?
+ 0 : pool->alloc_log_sz;
resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
if (!resource) {
- mlx5hws_err(pool->ctx, "Failed allocating resource\n");
+ mlx5hws_err(pool->ctx, "Failed to allocate resource\n");
return -EINVAL;
}
- pool->resource[idx] = resource;
+ pool->resource = resource;
if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
struct mlx5hws_pool_resource *mirror_resource;
fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true);
- opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range;
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ?
+ 0 : pool->alloc_log_sz;
mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
if (!mirror_resource) {
- mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n");
+ mlx5hws_err(pool->ctx, "Failed to allocate mirrored resource\n");
hws_pool_free_one_resource(resource);
- pool->resource[idx] = NULL;
+ pool->resource = NULL;
return -EINVAL;
}
- pool->mirror_resource[idx] = mirror_resource;
+ pool->mirror_resource = mirror_resource;
}
return 0;
}
-static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
-{
- unsigned long *cur_bmp;
-
- cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL);
- if (!cur_bmp)
- return NULL;
-
- bitmap_fill(cur_bmp, 1 << log_range);
-
- return cur_bmp;
-}
-
-static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static int hws_pool_buddy_init(struct mlx5hws_pool *pool)
{
struct mlx5hws_buddy_mem *buddy;
- buddy = pool->db.buddy_manager->buddies[chunk->resource_idx];
+ buddy = mlx5hws_buddy_create(pool->alloc_log_sz);
if (!buddy) {
- mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx);
- return;
- }
-
- mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
-}
-
-static struct mlx5hws_buddy_mem *
-hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx,
- u32 order, bool *is_new_buddy)
-{
- static struct mlx5hws_buddy_mem *buddy;
- u32 new_buddy_size;
-
- buddy = pool->db.buddy_manager->buddies[idx];
- if (buddy)
- return buddy;
-
- new_buddy_size = max(pool->alloc_log_sz, order);
- *is_new_buddy = true;
- buddy = mlx5hws_buddy_create(new_buddy_size);
- if (!buddy) {
- mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n",
- new_buddy_size, idx);
- return NULL;
+ mlx5hws_err(pool->ctx, "Failed to create buddy order: %zu\n",
+ pool->alloc_log_sz);
+ return -ENOMEM;
}
- if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) {
- mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
- pool->type, new_buddy_size, idx);
+ if (hws_pool_resource_alloc(pool) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d size %zu\n",
+ pool->type, pool->alloc_log_sz);
mlx5hws_buddy_cleanup(buddy);
- return NULL;
+ return -ENOMEM;
}
- pool->db.buddy_manager->buddies[idx] = buddy;
+ pool->db.buddy = buddy;
- return buddy;
+ return 0;
}
-static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
- int order,
- u32 *buddy_idx,
- int *seg)
+static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
{
- struct mlx5hws_buddy_mem *buddy;
- bool new_mem = false;
- int ret = 0;
- int i;
-
- *seg = -1;
-
- /* Find the next free place from the buddy array */
- while (*seg == -1) {
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- buddy = hws_pool_buddy_get_next_buddy(pool, i,
- order,
- &new_mem);
- if (!buddy) {
- ret = -ENOMEM;
- goto out;
- }
-
- *seg = mlx5hws_buddy_alloc_mem(buddy, order);
- if (*seg != -1)
- goto found;
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
- mlx5hws_err(pool->ctx,
- "Fail to allocate seg for one resource pool\n");
- ret = -ENOMEM;
- goto out;
- }
-
- if (new_mem) {
- /* We have new memory pool, should be place for us */
- mlx5hws_err(pool->ctx,
- "No memory for order: %d with buddy no: %d\n",
- order, i);
- ret = -ENOMEM;
- goto out;
- }
- }
+ struct mlx5hws_buddy_mem *buddy = pool->db.buddy;
+
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Bad buddy state\n");
+ return -EINVAL;
}
-found:
- *buddy_idx = i;
-out:
- return ret;
+ chunk->offset = mlx5hws_buddy_alloc_mem(buddy, chunk->order);
+ if (chunk->offset >= 0)
+ return 0;
+
+ return -ENOMEM;
}
-static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
{
- int ret = 0;
+ struct mlx5hws_buddy_mem *buddy;
- /* Go over the buddies and find next free slot */
- ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order,
- &chunk->resource_idx,
- &chunk->offset);
- if (ret)
- mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
- chunk->order);
+ buddy = pool->db.buddy;
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Bad buddy state\n");
+ return;
+ }
- return ret;
+ mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
}
static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool)
{
struct mlx5hws_buddy_mem *buddy;
- int i;
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- buddy = pool->db.buddy_manager->buddies[i];
- if (buddy) {
- mlx5hws_buddy_cleanup(buddy);
- kfree(buddy);
- pool->db.buddy_manager->buddies[i] = NULL;
- }
- }
- kfree(pool->db.buddy_manager);
+ buddy = pool->db.buddy;
+ if (buddy) {
+ mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
+ pool->db.buddy = NULL;
+ }
}
-static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
+static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool)
{
- pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL);
- if (!pool->db.buddy_manager)
- return -ENOMEM;
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) {
- bool new_buddy;
+ int ret;
- if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) {
- mlx5hws_err(pool->ctx,
- "Failed allocating memory on create log_sz: %d\n", log_range);
- kfree(pool->db.buddy_manager);
- return -ENOMEM;
- }
- }
+ ret = hws_pool_buddy_init(pool);
+ if (ret)
+ return ret;
pool->p_db_uninit = &hws_pool_buddy_db_uninit;
pool->p_get_chunk = &hws_pool_buddy_db_get_chunk;
@@ -278,261 +190,105 @@ static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
return 0;
}
-static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool,
- u32 alloc_size, int idx)
-{
- int ret = hws_pool_resource_alloc(pool, alloc_size, idx);
-
- if (ret) {
- mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
- pool->type, alloc_size, idx);
- return ret;
- }
-
- return 0;
-}
-
-static struct mlx5hws_pool_elements *
-hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx)
+static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
{
- struct mlx5hws_pool_elements *elem;
- u32 alloc_size;
-
- alloc_size = pool->alloc_log_sz;
+ unsigned long *bitmap;
- elem = kzalloc(sizeof(*elem), GFP_KERNEL);
- if (!elem)
+ bitmap = bitmap_zalloc(1 << log_range, GFP_KERNEL);
+ if (!bitmap)
return NULL;
- /* Sharing the same resource, also means that all the elements are with size 1 */
- if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) &&
- !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) {
- /* Currently all chunks in size 1 */
- elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order);
- if (!elem->bitmap) {
- mlx5hws_err(pool->ctx,
- "Failed to create bitmap type: %d: size %d index: %d\n",
- pool->type, alloc_size, idx);
- goto free_elem;
- }
-
- elem->log_size = alloc_size - order;
- }
-
- if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) {
- mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
- pool->type, alloc_size, idx);
- goto free_db;
- }
-
- pool->db.element_manager->elements[idx] = elem;
-
- return elem;
+ bitmap_fill(bitmap, 1 << log_range);
-free_db:
- bitmap_free(elem->bitmap);
-free_elem:
- kfree(elem);
- return NULL;
+ return bitmap;
}
-static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg)
+static int hws_pool_bitmap_init(struct mlx5hws_pool *pool)
{
- unsigned int segment, size;
-
- size = 1 << elem->log_size;
+ unsigned long *bitmap;
- segment = find_first_bit(elem->bitmap, size);
- if (segment >= size) {
- elem->is_full = true;
+ bitmap = hws_pool_create_and_init_bitmap(pool->alloc_log_sz);
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Failed to create bitmap order: %zu\n",
+ pool->alloc_log_sz);
return -ENOMEM;
}
- bitmap_clear(elem->bitmap, segment, 1);
- *seg = segment;
- return 0;
-}
-
-static int
-hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
- u32 *idx, int *seg)
-{
- struct mlx5hws_pool_elements *elem;
-
- elem = pool->db.element_manager->elements[0];
- if (!elem)
- elem = hws_pool_element_create_new_elem(pool, order, 0);
- if (!elem)
- goto err_no_elem;
-
- if (hws_pool_element_find_seg(elem, seg) != 0) {
- mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+ if (hws_pool_resource_alloc(pool) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %zu\n",
+ pool->type, pool->alloc_log_sz);
+ bitmap_free(bitmap);
return -ENOMEM;
}
- *idx = 0;
- elem->num_of_elements++;
- return 0;
+ pool->db.bitmap = bitmap;
-err_no_elem:
- mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
- return -ENOMEM;
-}
-
-static int
-hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
- u32 *idx, int *seg)
-{
- int ret, i;
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- if (!pool->resource[i]) {
- ret = hws_pool_create_resource_on_index(pool, order, i);
- if (ret)
- goto err_no_res;
- *idx = i;
- *seg = 0; /* One memory slot in that element */
- return 0;
- }
- }
-
- mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
- return -ENOMEM;
-
-err_no_res:
- mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
- return -ENOMEM;
+ return 0;
}
-static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static int hws_pool_bitmap_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
{
- int ret;
-
- /* Go over all memory elements and find/allocate free slot */
- ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order,
- &chunk->resource_idx,
- &chunk->offset);
- if (ret)
- mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
- chunk->order);
+ unsigned long *bitmap, size;
- return ret;
-}
+ if (chunk->order != 0) {
+ mlx5hws_err(pool->ctx, "Pool only supports order 0 allocs\n");
+ return -EINVAL;
+ }
-static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- if (unlikely(!pool->resource[chunk->resource_idx]))
- pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+ bitmap = pool->db.bitmap;
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Bad bitmap state\n");
+ return -EINVAL;
+ }
- if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE)
- hws_pool_resource_free(pool, chunk->resource_idx);
-}
+ size = 1 << pool->alloc_log_sz;
-static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool)
-{
- (void)pool;
-}
+ chunk->offset = find_first_bit(bitmap, size);
+ if (chunk->offset >= size)
+ return -ENOMEM;
-/* This memory management works as the following:
- * - At start doesn't allocate no mem at all.
- * - When new request for chunk arrived:
- * allocate resource and give it.
- * - When free that chunk:
- * the resource is freed.
- */
-static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool)
-{
- pool->p_db_uninit = &hws_pool_general_element_db_uninit;
- pool->p_get_chunk = &hws_pool_general_element_db_get_chunk;
- pool->p_put_chunk = &hws_pool_general_element_db_put_chunk;
+ bitmap_clear(bitmap, chunk->offset, 1);
return 0;
}
-static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_elements *elem,
- struct mlx5hws_pool_chunk *chunk)
-{
- if (unlikely(!pool->resource[chunk->resource_idx]))
- pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
-
- hws_pool_resource_free(pool, chunk->resource_idx);
- kfree(elem);
- pool->db.element_manager->elements[chunk->resource_idx] = NULL;
-}
-
-static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static void hws_pool_bitmap_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
{
- struct mlx5hws_pool_elements *elem;
+ unsigned long *bitmap;
- if (unlikely(chunk->resource_idx))
- pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
-
- elem = pool->db.element_manager->elements[chunk->resource_idx];
- if (!elem) {
- mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx);
+ bitmap = pool->db.bitmap;
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Bad bitmap state\n");
return;
}
- bitmap_set(elem->bitmap, chunk->offset, 1);
- elem->is_full = false;
- elem->num_of_elements--;
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE &&
- !elem->num_of_elements)
- hws_onesize_element_db_destroy_element(pool, elem, chunk);
+ bitmap_set(bitmap, chunk->offset, 1);
}
-static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static void hws_pool_bitmap_db_uninit(struct mlx5hws_pool *pool)
{
- int ret = 0;
+ unsigned long *bitmap;
- /* Go over all memory elements and find/allocate free slot */
- ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order,
- &chunk->resource_idx,
- &chunk->offset);
- if (ret)
- mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
- chunk->order);
-
- return ret;
-}
-
-static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool)
-{
- struct mlx5hws_pool_elements *elem;
- int i;
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- elem = pool->db.element_manager->elements[i];
- if (elem) {
- bitmap_free(elem->bitmap);
- kfree(elem);
- pool->db.element_manager->elements[i] = NULL;
- }
+ bitmap = pool->db.bitmap;
+ if (bitmap) {
+ bitmap_free(bitmap);
+ pool->db.bitmap = NULL;
}
- kfree(pool->db.element_manager);
}
-/* This memory management works as the following:
- * - At start doesn't allocate no mem at all.
- * - When new request for chunk arrived:
- * aloocate the first and only slot of memory/resource
- * when it ended return error.
- */
-static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool)
+static int hws_pool_bitmap_db_init(struct mlx5hws_pool *pool)
{
- pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL);
- if (!pool->db.element_manager)
- return -ENOMEM;
+ int ret;
- pool->p_db_uninit = &hws_onesize_element_db_uninit;
- pool->p_get_chunk = &hws_onesize_element_db_get_chunk;
- pool->p_put_chunk = &hws_onesize_element_db_put_chunk;
+ ret = hws_pool_bitmap_init(pool);
+ if (ret)
+ return ret;
+
+ pool->p_db_uninit = &hws_pool_bitmap_db_uninit;
+ pool->p_get_chunk = &hws_pool_bitmap_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_bitmap_db_put_chunk;
return 0;
}
@@ -542,15 +298,14 @@ static int hws_pool_db_init(struct mlx5hws_pool *pool,
{
int ret;
- if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE)
- ret = hws_pool_general_element_db_init(pool);
- else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE)
- ret = hws_pool_onesize_element_db_init(pool);
+ if (db_type == MLX5HWS_POOL_DB_TYPE_BITMAP)
+ ret = hws_pool_bitmap_db_init(pool);
else
- ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz);
+ ret = hws_pool_buddy_db_init(pool);
if (ret) {
- mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret);
+ mlx5hws_err(pool->ctx, "Failed to init pool type: %d (ret: %d)\n",
+ db_type, ret);
return ret;
}
@@ -569,6 +324,8 @@ int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
mutex_lock(&pool->lock);
ret = pool->p_get_chunk(pool, chunk);
+ if (ret == 0)
+ pool->available_elems -= 1 << chunk->order;
mutex_unlock(&pool->lock);
return ret;
@@ -579,6 +336,7 @@ void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
{
mutex_lock(&pool->lock);
pool->p_put_chunk(pool, chunk);
+ pool->available_elems += 1 << chunk->order;
mutex_unlock(&pool->lock);
}
@@ -599,17 +357,13 @@ mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_
pool->tbl_type = pool_attr->table_type;
pool->opt_type = pool_attr->opt_type;
- /* Support general db */
- if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
- MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK))
- res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE;
- else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
- MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS))
- res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE;
- else
+ if (pool->flags & MLX5HWS_POOL_FLAG_BUDDY)
res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY;
+ else
+ res_db_type = MLX5HWS_POOL_DB_TYPE_BITMAP;
pool->alloc_log_sz = pool_attr->alloc_log_sz;
+ pool->available_elems = 1 << pool_attr->alloc_log_sz;
if (hws_pool_db_init(pool, res_db_type))
goto free_pool;
@@ -623,18 +377,17 @@ free_pool:
return NULL;
}
-int mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
+void mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
{
- int i;
-
mutex_destroy(&pool->lock);
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++)
- if (pool->resource[i])
- hws_pool_resource_free(pool, i);
+ if (pool->available_elems != 1 << pool->alloc_log_sz)
+ mlx5hws_err(pool->ctx, "Attempting to destroy non-empty pool\n");
+
+ if (pool->resource)
+ hws_pool_resource_free(pool);
hws_pool_db_unint(pool);
kfree(pool);
- return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
index 621298b352b2..33e33d5f1fb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
@@ -6,16 +6,12 @@
#define MLX5HWS_POOL_STC_LOG_SZ 15
-#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100
-
enum mlx5hws_pool_type {
MLX5HWS_POOL_TYPE_STE,
MLX5HWS_POOL_TYPE_STC,
};
struct mlx5hws_pool_chunk {
- u32 resource_idx;
- /* Internal offset, relative to base index */
int offset;
int order;
};
@@ -27,35 +23,17 @@ struct mlx5hws_pool_resource {
};
enum mlx5hws_pool_flags {
- /* Only a one resource in that pool */
- MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0,
- MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,
- /* No sharing resources between chunks */
- MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,
- /* All objects are in the same size */
- MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,
- /* Managed by buddy allocator */
- MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,
- /* Allocate pool_type memory on pool creation */
- MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,
-
- /* These values should be used by the caller */
- MLX5HWS_POOL_FLAGS_FOR_STC_POOL =
- MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
- MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS,
- MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL =
- MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
- MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK,
- MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL =
- MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
- MLX5HWS_POOL_FLAGS_BUDDY_MANAGED |
- MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE,
+ /* Managed by a buddy allocator. If this is not set only allocations of
+ * order 0 are supported.
+ */
+ MLX5HWS_POOL_FLAG_BUDDY = BIT(0),
};
enum mlx5hws_pool_optimize {
MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
+ MLX5HWS_POOL_OPTIMIZE_MAX = 0x3,
};
struct mlx5hws_pool_attr {
@@ -68,34 +46,17 @@ struct mlx5hws_pool_attr {
};
enum mlx5hws_db_type {
- /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/
- MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE,
- /* One resource only, all the elements are with same one size */
- MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE,
- /* Many resources, the memory allocated with buddy mechanism */
+ /* Uses a bitmap, supports only allocations of order 0. */
+ MLX5HWS_POOL_DB_TYPE_BITMAP,
+ /* Entries are managed using a buddy mechanism. */
MLX5HWS_POOL_DB_TYPE_BUDDY,
};
-struct mlx5hws_buddy_manager {
- struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ];
-};
-
-struct mlx5hws_pool_elements {
- u32 num_of_elements;
- unsigned long *bitmap;
- u32 log_size;
- bool is_full;
-};
-
-struct mlx5hws_element_manager {
- struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ];
-};
-
struct mlx5hws_pool_db {
enum mlx5hws_db_type type;
union {
- struct mlx5hws_element_manager *element_manager;
- struct mlx5hws_buddy_manager *buddy_manager;
+ unsigned long *bitmap;
+ struct mlx5hws_buddy_mem *buddy;
};
};
@@ -111,11 +72,11 @@ struct mlx5hws_pool {
enum mlx5hws_pool_flags flags;
struct mutex lock; /* protect the pool */
size_t alloc_log_sz;
+ size_t available_elems;
enum mlx5hws_table_type tbl_type;
enum mlx5hws_pool_optimize opt_type;
- struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
- struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
- /* DB */
+ struct mlx5hws_pool_resource *resource;
+ struct mlx5hws_pool_resource *mirror_resource;
struct mlx5hws_pool_db db;
/* Functions */
mlx5hws_pool_unint_db p_db_uninit;
@@ -127,7 +88,7 @@ struct mlx5hws_pool *
mlx5hws_pool_create(struct mlx5hws_context *ctx,
struct mlx5hws_pool_attr *pool_attr);
-int mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
+void mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
struct mlx5hws_pool_chunk *chunk);
@@ -135,17 +96,37 @@ int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
struct mlx5hws_pool_chunk *chunk);
-static inline u32
-mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static inline u32 mlx5hws_pool_get_base_id(struct mlx5hws_pool *pool)
{
- return pool->resource[chunk->resource_idx]->base_id;
+ return pool->resource->base_id;
}
-static inline u32
-mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static inline u32 mlx5hws_pool_get_base_mirror_id(struct mlx5hws_pool *pool)
{
- return pool->mirror_resource[chunk->resource_idx]->base_id;
+ return pool->mirror_resource->base_id;
+}
+
+static inline bool
+mlx5hws_pool_empty(struct mlx5hws_pool *pool)
+{
+ bool ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->available_elems == 0;
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+
+static inline bool
+mlx5hws_pool_full(struct mlx5hws_pool *pool)
+{
+ bool ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->available_elems == (1 << pool->alloc_log_sz);
+ mutex_unlock(&pool->lock);
+
+ return ret;
}
#endif /* MLX5HWS_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
index de92cecbeb92..271490a51b96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
@@ -390,11 +390,6 @@ struct mlx5_ifc_definer_bits {
u8 match_mask[0x160];
};
-struct mlx5_ifc_arg_bits {
- u8 rsvd0[0x88];
- u8 access_pd[0x18];
-};
-
struct mlx5_ifc_header_modify_pattern_in_bits {
u8 modify_field_select[0x40];
@@ -428,11 +423,6 @@ struct mlx5_ifc_create_definer_in_bits {
struct mlx5_ifc_definer_bits definer;
};
-struct mlx5_ifc_create_arg_in_bits {
- struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
- struct mlx5_ifc_arg_bits arg;
-};
-
struct mlx5_ifc_create_header_modify_pattern_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_header_modify_pattern_in_bits pattern;
@@ -479,36 +469,4 @@ enum {
MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
};
-struct mlx5_ifc_alloc_packet_reformat_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 packet_reformat_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_packet_reformat_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
-
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
-
- u8 packet_reformat_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_packet_reformat_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 reserved_at_40[0x40];
-};
-
#endif /* MLX5_PRM_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
index e20c67a04203..5342a4cc7194 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
@@ -129,27 +129,18 @@ static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
static void
hws_rule_save_resize_info(struct mlx5hws_rule *rule,
- struct mlx5hws_send_ste_attr *ste_attr,
- bool is_update)
+ struct mlx5hws_send_ste_attr *ste_attr)
{
if (!mlx5hws_matcher_is_resizable(rule->matcher))
return;
- if (likely(!is_update)) {
+ /* resize_info might already exist (if we're in update flow) */
+ if (likely(!rule->resize_info)) {
rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
if (unlikely(!rule->resize_info)) {
pr_warn("HWS: resize info isn't allocated for rule\n");
return;
}
-
- rule->resize_info->max_stes =
- rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
- rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ?
- rule->matcher->action_ste[0].pool :
- NULL;
- rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ?
- rule->matcher->action_ste[1].pool :
- NULL;
}
memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
@@ -204,84 +195,30 @@ hws_rule_load_delete_info(struct mlx5hws_rule *rule,
}
}
-static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
- u8 action_ste_selector)
-{
- struct mlx5hws_matcher *matcher = rule->matcher;
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_pool_chunk ste = {0};
- int ret;
-
- action_ste = &matcher->action_ste[action_ste_selector];
- ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
- ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
- if (unlikely(ret)) {
- mlx5hws_err(matcher->tbl->ctx,
- "Failed to allocate STE for rule actions");
- return ret;
- }
- rule->action_ste_idx = ste.offset;
-
- return 0;
-}
-
-static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule,
- u8 action_ste_selector)
+static int mlx5hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
+ u16 queue_id, bool skip_rx,
+ bool skip_tx)
{
struct mlx5hws_matcher *matcher = rule->matcher;
- struct mlx5hws_pool_chunk ste = {0};
- struct mlx5hws_pool *pool;
- u8 max_stes;
-
- if (mlx5hws_matcher_is_resizable(matcher)) {
- /* Free the original action pool if rule was resized */
- max_stes = rule->resize_info->max_stes;
- pool = rule->resize_info->action_ste_pool[action_ste_selector];
- } else {
- max_stes = matcher->action_ste[action_ste_selector].max_stes;
- pool = matcher->action_ste[action_ste_selector].pool;
- }
-
- /* This release is safe only when the rule match part was deleted */
- ste.order = ilog2(roundup_pow_of_two(max_stes));
- ste.offset = rule->action_ste_idx;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
- mlx5hws_pool_chunk_free(pool, &ste);
+ rule->action_ste.ste.order =
+ ilog2(roundup_pow_of_two(matcher->num_of_action_stes));
+ return mlx5hws_action_ste_chunk_alloc(&ctx->action_ste_pool[queue_id],
+ skip_rx, skip_tx,
+ &rule->action_ste);
}
-static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
- struct mlx5hws_rule_attr *attr)
+void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste)
{
- int action_ste_idx;
- int ret;
-
- ret = hws_rule_alloc_action_ste_idx(rule, 0);
- if (unlikely(ret))
- return ret;
-
- action_ste_idx = rule->action_ste_idx;
-
- ret = hws_rule_alloc_action_ste_idx(rule, 1);
- if (unlikely(ret)) {
- hws_rule_free_action_ste_idx(rule, 0);
- return ret;
- }
-
- /* Both pools have to return the same index */
- if (unlikely(rule->action_ste_idx != action_ste_idx)) {
- pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n");
- return -EINVAL;
- }
-
- return 0;
-}
+ if (!action_ste->action_tbl)
+ return;
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule)
-{
- if (rule->action_ste_idx > -1) {
- hws_rule_free_action_ste_idx(rule, 1);
- hws_rule_free_action_ste_idx(rule, 0);
- }
+ /* This release is safe only when the rule match STE was deleted
+ * (when the rule is being deleted) or replaced with the new STE that
+ * isn't pointing to old action STEs (when the rule is being updated).
+ */
+ mlx5hws_action_ste_chunk_free(action_ste);
}
static void hws_rule_create_init(struct mlx5hws_rule *rule,
@@ -298,14 +235,17 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
/* In update we use these rtc's */
rule->rtc_0 = 0;
rule->rtc_1 = 0;
- rule->action_ste_selector = 0;
+
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
} else {
- rule->action_ste_selector = !rule->action_ste_selector;
+ rule->status = MLX5HWS_RULE_STATUS_UPDATING;
+ /* Save the old action STE info so we can free it after writing
+ * new action STEs and a corresponding match STE.
+ */
+ rule->old_action_ste = rule->action_ste;
}
rule->pending_wqes = 0;
- rule->action_ste_idx = -1;
- rule->status = MLX5HWS_RULE_STATUS_CREATING;
/* Init default send STE attributes */
ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
@@ -315,8 +255,7 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
/* Init default action apply */
apply->tbl_type = tbl->type;
- apply->common_res = &ctx->common_res[tbl->type];
- apply->jump_to_action_stc = matcher->action_ste[0].stc.offset;
+ apply->common_res = &ctx->common_res;
apply->require_dep = 0;
}
@@ -332,8 +271,6 @@ static void hws_rule_move_init(struct mlx5hws_rule *rule,
rule->rtc_1 = 0;
rule->pending_wqes = 0;
- rule->action_ste_idx = -1;
- rule->action_ste_selector = 0;
rule->status = MLX5HWS_RULE_STATUS_CREATING;
rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
}
@@ -394,21 +331,24 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule,
if (action_stes) {
/* Allocate action STEs for rules that need more than match STE */
- if (!is_update) {
- ret = hws_rule_alloc_action_ste(rule, attr);
- if (ret) {
- mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
- mlx5hws_send_abort_new_dep_wqe(queue);
- return ret;
- }
+ ret = mlx5hws_rule_alloc_action_ste(rule, attr->queue_id,
+ !!ste_attr.rtc_0,
+ !!ste_attr.rtc_1);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ return ret;
}
+ apply.jump_to_action_stc =
+ rule->action_ste.action_tbl->stc.offset;
/* Skip RX/TX based on the dep_wqe init */
ste_attr.rtc_0 = dep_wqe->rtc_0 ?
- matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0;
+ rule->action_ste.action_tbl->rtc_0_id : 0;
ste_attr.rtc_1 = dep_wqe->rtc_1 ?
- matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0;
+ rule->action_ste.action_tbl->rtc_1_id : 0;
/* Action STEs are written to a specific index last to first */
- ste_attr.direct_index = rule->action_ste_idx + action_stes;
+ ste_attr.direct_index =
+ rule->action_ste.ste.offset + action_stes;
apply.next_direct_idx = ste_attr.direct_index;
} else {
apply.next_direct_idx = 0;
@@ -459,7 +399,7 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule,
if (!is_update)
hws_rule_save_delete_info(rule, &ste_attr);
- hws_rule_save_resize_info(rule, &ste_attr, is_update);
+ hws_rule_save_resize_info(rule, &ste_attr);
mlx5hws_send_engine_inc_rule(queue);
if (!attr->burst)
@@ -480,7 +420,10 @@ static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
/* Rule failed now we can safely release action STEs */
- mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_free_action_ste(&rule->action_ste);
+
+ /* Perhaps the rule failed updating - release old action STEs as well */
+ mlx5hws_rule_free_action_ste(&rule->old_action_ste);
/* Clear complex tag */
hws_rule_clear_delete_info(rule);
@@ -517,7 +460,8 @@ static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
}
/* Rule is not completed yet */
- if (rule->status == MLX5HWS_RULE_STATUS_CREATING)
+ if (rule->status == MLX5HWS_RULE_STATUS_CREATING ||
+ rule->status == MLX5HWS_RULE_STATUS_UPDATING)
return -EBUSY;
/* Rule failed and doesn't require cleanup */
@@ -534,7 +478,7 @@ static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
hws_rule_gen_comp(queue, rule, false,
attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
- mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_free_action_ste(&rule->action_ste);
mlx5hws_rule_clear_resize_info(rule);
return 0;
}
@@ -711,6 +655,124 @@ int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
return 0;
}
+static u8 hws_rule_ethertype_to_matcher_ipv(u32 ethertype)
+{
+ switch (ethertype) {
+ case ETH_P_IP:
+ return MLX5HWS_MATCHER_IPV_4;
+ case ETH_P_IPV6:
+ return MLX5HWS_MATCHER_IPV_6;
+ default:
+ return MLX5HWS_MATCHER_IPV_UNSET;
+ }
+}
+
+static u8 hws_rule_ip_version_to_matcher_ipv(u32 ip_version)
+{
+ switch (ip_version) {
+ case 4:
+ return MLX5HWS_MATCHER_IPV_4;
+ case 6:
+ return MLX5HWS_MATCHER_IPV_6;
+ default:
+ return MLX5HWS_MATCHER_IPV_UNSET;
+ }
+}
+
+static int hws_rule_check_outer_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 outer_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 outer_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 outer_ipv, ver;
+
+ if (matcher->matches_outer_ethertype) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ outer_headers.ethertype);
+ outer_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver);
+ }
+ if (matcher->matches_outer_ip_version) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ outer_headers.ip_version);
+ outer_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver);
+ }
+
+ if (outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv_ether != outer_ipv_ip) {
+ mlx5hws_err(ctx, "Rule matches on inconsistent outer ethertype and ip version\n");
+ return -EINVAL;
+ }
+
+ outer_ipv = outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ?
+ outer_ipv_ether : outer_ipv_ip;
+ if (outer_ipv != MLX5HWS_MATCHER_IPV_UNSET &&
+ matcher->outer_ip_version != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv != matcher->outer_ip_version) {
+ mlx5hws_err(ctx, "Matcher and rule disagree on outer IP version\n");
+ return -EINVAL;
+ }
+ matcher->outer_ip_version = outer_ipv;
+
+ return 0;
+}
+
+static int hws_rule_check_inner_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 inner_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 inner_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 inner_ipv, ver;
+
+ if (matcher->matches_inner_ethertype) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ inner_headers.ethertype);
+ inner_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver);
+ }
+ if (matcher->matches_inner_ip_version) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ inner_headers.ip_version);
+ inner_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver);
+ }
+
+ if (inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv_ether != inner_ipv_ip) {
+ mlx5hws_err(ctx, "Rule matches on inconsistent inner ethertype and ip version\n");
+ return -EINVAL;
+ }
+
+ inner_ipv = inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ?
+ inner_ipv_ether : inner_ipv_ip;
+ if (inner_ipv != MLX5HWS_MATCHER_IPV_UNSET &&
+ matcher->inner_ip_version != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv != matcher->inner_ip_version) {
+ mlx5hws_err(ctx, "Matcher and rule disagree on inner IP version\n");
+ return -EINVAL;
+ }
+ matcher->inner_ip_version = inner_ipv;
+
+ return 0;
+}
+
+static int hws_rule_check_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ int ret;
+
+ ret = hws_rule_check_outer_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_check_inner_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
u8 mt_idx,
u32 *match_param,
@@ -721,6 +783,10 @@ int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
{
int ret;
+ ret = hws_rule_check_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
rule_handle->matcher = matcher;
ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
index 495cdd17e9f3..1c47a9c11572 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
@@ -15,6 +15,8 @@ enum mlx5hws_rule_status {
MLX5HWS_RULE_STATUS_UNKNOWN,
MLX5HWS_RULE_STATUS_CREATING,
MLX5HWS_RULE_STATUS_CREATED,
+ MLX5HWS_RULE_STATUS_UPDATING,
+ MLX5HWS_RULE_STATUS_UPDATED,
MLX5HWS_RULE_STATUS_DELETING,
MLX5HWS_RULE_STATUS_DELETED,
MLX5HWS_RULE_STATUS_FAILING,
@@ -42,12 +44,10 @@ struct mlx5hws_rule_match_tag {
};
struct mlx5hws_rule_resize_info {
- struct mlx5hws_pool *action_ste_pool[2];
u32 rtc_0;
u32 rtc_1;
u32 rule_idx;
u8 state;
- u8 max_stes;
u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */
u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */
};
@@ -58,18 +58,18 @@ struct mlx5hws_rule {
struct mlx5hws_rule_match_tag tag;
struct mlx5hws_rule_resize_info *resize_info;
};
+ struct mlx5hws_action_ste_chunk action_ste;
+ struct mlx5hws_action_ste_chunk old_action_ste;
u32 rtc_0; /* The RTC into which the STE was inserted */
u32 rtc_1; /* The RTC into which the STE was inserted */
- int action_ste_idx; /* STE array index */
u8 status; /* enum mlx5hws_rule_status */
- u8 action_ste_selector; /* For rule update - which action STE is in use */
u8 pending_wqes;
bool skip_delete; /* For complex rules - another rule with same tag
* still exists, so don't actually delete this rule.
*/
};
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule);
+void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste);
int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
void *queue, void *user_data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index 424797b6d802..c4b22be19a9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -344,18 +344,133 @@ hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
}
}
+static void hws_send_engine_dump_error_cqe(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ struct mlx5_cqe64 *cqe)
+{
+ u8 wqe_opcode = cqe ? be32_to_cpu(cqe->sop_drop_qpn) >> 24 : 0;
+ struct mlx5hws_context *ctx = priv->rule->matcher->tbl->ctx;
+ u32 opcode = cqe ? get_cqe_opcode(cqe) : 0;
+ struct mlx5hws_rule *rule = priv->rule;
+
+ /* If something bad happens and lots of rules are failing, we don't
+ * want to pollute dmesg. Print only the first bad cqe per engine,
+ * the one that started the avalanche.
+ */
+ if (queue->error_cqe_printed)
+ return;
+
+ queue->error_cqe_printed = true;
+
+ if (mlx5hws_rule_move_in_progress(rule))
+ mlx5hws_err(ctx,
+ "--- rule 0x%08llx: error completion moving rule: phase %s, wqes left %d\n",
+ HWS_PTR_TO_ID(rule),
+ rule->resize_info->state ==
+ MLX5HWS_RULE_RESIZE_STATE_WRITING ? "WRITING" :
+ rule->resize_info->state ==
+ MLX5HWS_RULE_RESIZE_STATE_DELETING ? "DELETING" :
+ "UNKNOWN",
+ rule->pending_wqes);
+ else
+ mlx5hws_err(ctx,
+ "--- rule 0x%08llx: error completion %s (%d), wqes left %d\n",
+ HWS_PTR_TO_ID(rule),
+ rule->status ==
+ MLX5HWS_RULE_STATUS_CREATING ? "CREATING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_DELETING ? "DELETING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_FAILING ? "FAILING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_UPDATING ? "UPDATING" : "NA",
+ rule->status,
+ rule->pending_wqes);
+
+ mlx5hws_err(ctx, " rule 0x%08llx: matcher 0x%llx %s\n",
+ HWS_PTR_TO_ID(rule),
+ HWS_PTR_TO_ID(rule->matcher),
+ (rule->matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED) ?
+ "(isolated)" : "");
+
+ if (!cqe) {
+ mlx5hws_err(ctx, " rule 0x%08llx: no CQE\n",
+ HWS_PTR_TO_ID(rule));
+ return;
+ }
+
+ mlx5hws_err(ctx, " rule 0x%08llx: cqe->opcode = %d %s\n",
+ HWS_PTR_TO_ID(rule), opcode,
+ opcode == MLX5_CQE_REQ ? "(MLX5_CQE_REQ)" :
+ opcode == MLX5_CQE_REQ_ERR ? "(MLX5_CQE_REQ_ERR)" : " ");
+
+ if (opcode == MLX5_CQE_REQ_ERR) {
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
+
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- hw_error_syndrome = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->rsvd1[16]);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- hw_syndrome_type = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->rsvd1[17] >> 4);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- vendor_err_synd = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->vendor_err_synd);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- syndrome = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->syndrome);
+ }
+
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: cqe->byte_cnt = 0x%08x\n",
+ HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->byte_cnt));
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |-- UPDATE STATUS = %s\n",
+ HWS_PTR_TO_ID(rule),
+ (be32_to_cpu(cqe->byte_cnt) & 0x80000000) ?
+ "FAILURE" : "SUCCESS");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |------- SYNDROME = %s\n",
+ HWS_PTR_TO_ID(rule),
+ ((be32_to_cpu(cqe->byte_cnt) & 0x00000003) == 1) ?
+ "SET_FLOW_FAIL" :
+ ((be32_to_cpu(cqe->byte_cnt) & 0x00000003) == 2) ?
+ "DISABLE_FLOW_FAIL" : "UNKNOWN");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: cqe->sop_drop_qpn = 0x%08x\n",
+ HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->sop_drop_qpn));
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |-send wqe opcode = 0x%02x %s\n",
+ HWS_PTR_TO_ID(rule), wqe_opcode,
+ wqe_opcode == MLX5HWS_WQE_OPCODE_TBL_ACCESS ?
+ "(MLX5HWS_WQE_OPCODE_TBL_ACCESS)" : "(UNKNOWN)");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |------------ qpn = 0x%06x\n",
+ HWS_PTR_TO_ID(rule),
+ be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff);
+}
+
static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ring_priv *priv,
u16 wqe_cnt,
- enum mlx5hws_flow_op_status *status)
+ enum mlx5hws_flow_op_status *status,
+ struct mlx5_cqe64 *cqe)
{
priv->rule->pending_wqes--;
- if (*status == MLX5HWS_FLOW_OP_ERROR) {
+ if (unlikely(*status == MLX5HWS_FLOW_OP_ERROR)) {
if (priv->retry_id) {
+ /* If there is a retry_id, then it's not an error yet,
+ * retry to insert this rule in the collision RTC.
+ */
hws_send_engine_retry_post_send(queue, priv, wqe_cnt);
return;
}
+ hws_send_engine_dump_error_cqe(queue, priv, cqe);
/* Some part of the rule failed */
priv->rule->status = MLX5HWS_RULE_STATUS_FAILING;
*priv->used_id = 0;
@@ -377,17 +492,25 @@ static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
*status = MLX5HWS_FLOW_OP_ERROR;
} else {
- /* Increase the status, this only works on good flow as the enum
- * is arrange it away creating -> created -> deleting -> deleted
+ /* Increase the status, this only works on good flow as
+ * the enum is arranged this way:
+ * - creating -> created
+ * - updating -> updated
+ * - deleting -> deleted
*/
priv->rule->status++;
*status = MLX5HWS_FLOW_OP_SUCCESS;
- /* Rule was deleted now we can safely release action STEs
- * and clear resize info
- */
if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
- mlx5hws_rule_free_action_ste(priv->rule);
+ /* Rule was deleted, now we can safely release
+ * action STEs and clear resize info
+ */
+ mlx5hws_rule_free_action_ste(&priv->rule->action_ste);
mlx5hws_rule_clear_resize_info(priv->rule);
+ } else if (priv->rule->status == MLX5HWS_RULE_STATUS_UPDATED) {
+ /* Rule was updated, free the old action STEs */
+ mlx5hws_rule_free_action_ste(&priv->rule->old_action_ste);
+ /* Update completed - move the rule back to "created" */
+ priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
}
}
}
@@ -412,7 +535,8 @@ static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
if (priv->user_data) {
if (priv->rule) {
- hws_send_engine_update_rule(queue, priv, wqe_cnt, &status);
+ hws_send_engine_update_rule(queue, priv, wqe_cnt,
+ &status, cqe);
/* Completion is provided on the last rule WQE */
if (priv->rule->pending_wqes)
return;
@@ -633,6 +757,7 @@ static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+ MLX5_SET(sqc, sqc, non_wire, 1);
ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
@@ -896,15 +1021,18 @@ close_cq:
return err;
}
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
+static void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
{
+ if (!queue->num_entries)
+ return; /* this queue wasn't initialized */
+
hws_send_ring_close(queue);
kfree(queue->completed.entries);
}
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
- struct mlx5hws_send_engine *queue,
- u16 queue_size)
+static int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size)
{
int err;
@@ -990,6 +1118,7 @@ static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
for (i = 0; i < bwc_queues; i++) {
mutex_init(&ctx->bwc_send_queue_locks[i]);
lockdep_register_key(ctx->bwc_lock_class_keys + i);
+ lockdep_set_class(ctx->bwc_send_queue_locks + i, ctx->bwc_lock_class_keys + i);
}
return 0;
@@ -1004,7 +1133,7 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
u16 queue_size)
{
int err = 0;
- u32 i;
+ int i = 0;
/* Open one extra queue for control path */
ctx->queues = queues + 1;
@@ -1020,7 +1149,13 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
goto free_bwc_locks;
}
- for (i = 0; i < ctx->queues; i++) {
+ /* If native API isn't supported, skip the unused native queues:
+ * initialize BWC queues and control queue only.
+ */
+ if (!mlx5hws_context_native_supported(ctx))
+ i = mlx5hws_bwc_get_queue_id(ctx, 0);
+
+ for (; i < ctx->queues; i++) {
err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
if (err)
goto close_send_queues;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
index b50825d6dc53..3fb8e99309b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
@@ -140,6 +140,7 @@ struct mlx5hws_send_engine {
u16 used_entries;
u16 num_entries;
bool err;
+ bool error_cqe_printed;
struct mutex lock; /* Protects the send engine */
};
@@ -189,12 +190,6 @@ void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
-
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
- struct mlx5hws_send_engine *queue,
- u16 queue_size);
-
void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
index 9576e02d00c3..568f691733f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
@@ -37,6 +37,7 @@ static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
}
static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+__must_hold(&tbl->ctx->ctrl_lock)
{
struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
@@ -48,8 +49,8 @@ static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
return 0;
- if (ctx->common_res[tbl_type].default_miss) {
- ctx->common_res[tbl_type].default_miss->refcount++;
+ if (ctx->common_res.default_miss) {
+ ctx->common_res.default_miss->refcount++;
return 0;
}
@@ -70,29 +71,28 @@ static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
return -EINVAL;
}
- /* ctx->ctrl_lock must be held here */
- ctx->common_res[tbl_type].default_miss = default_miss;
- ctx->common_res[tbl_type].default_miss->refcount++;
+ ctx->common_res.default_miss = default_miss;
+ ctx->common_res.default_miss->refcount++;
return 0;
}
/* Called under ctx->ctrl_lock */
static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+__must_hold(&tbl->ctx->ctrl_lock)
{
struct mlx5hws_cmd_forward_tbl *default_miss;
struct mlx5hws_context *ctx = tbl->ctx;
- u8 tbl_type = tbl->type;
if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
return;
- default_miss = ctx->common_res[tbl_type].default_miss;
+ default_miss = ctx->common_res.default_miss;
if (--default_miss->refcount)
return;
mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss);
- ctx->common_res[tbl_type].default_miss = NULL;
+ ctx->common_res.default_miss = NULL;
}
static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id)
@@ -342,10 +342,10 @@ int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
}
-static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
- u32 ft_id,
- u32 fw_ft_type,
- u32 next_ft_id)
+int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id)
{
struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
@@ -389,10 +389,10 @@ int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
if (dst_tbl) {
if (list_empty(&dst_tbl->matchers_list)) {
/* Connect src_tbl last_ft to dst_tbl start anchor */
- ret = hws_table_ft_set_next_ft(src_tbl->ctx,
- last_ft_id,
- src_tbl->fw_ft_type,
- dst_tbl->ft_id);
+ ret = mlx5hws_table_ft_set_next_ft(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ dst_tbl->ft_id);
if (ret)
return ret;
@@ -478,15 +478,9 @@ int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
if (old_miss_tbl)
list_del_init(&tbl->default_miss.next);
- old_miss_tbl = tbl->default_miss.miss_tbl;
- if (old_miss_tbl)
- list_del_init(&old_miss_tbl->default_miss.head);
-
if (miss_tbl)
list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head);
- mutex_unlock(&ctx->ctrl_lock);
- return 0;
out:
mutex_unlock(&ctx->ctrl_lock);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
index dd50420eec9e..0400cce0c317 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
@@ -65,4 +65,9 @@ int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
u32 rtc_0_id,
u32 rtc_1_id);
+int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id);
+
#endif /* MLX5HWS_TABLE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
index 3d74109f8230..65740bb68b09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
@@ -8,7 +8,7 @@
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
((dmn)->info.caps.dmn_type##_sw_owner || \
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
- (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
+ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_8))
bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
{
@@ -297,7 +297,9 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
if (ret) {
mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
kvfree(vport_caps);
- return ERR_PTR(ret);
+ if (ret == -EBUSY)
+ return ERR_PTR(-EBUSY);
+ return NULL;
}
return vport_caps;
@@ -514,30 +516,6 @@ def_xa_destroy:
return NULL;
}
-/* Assure synchronization of the device steering tables with updates made by SW
- * insertion.
- */
-int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
-{
- int ret = 0;
-
- if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
- mlx5dr_domain_lock(dmn);
- ret = mlx5dr_send_ring_force_drain(dmn);
- mlx5dr_domain_unlock(dmn);
- if (ret) {
- mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
- flags, ret);
- return ret;
- }
- }
-
- if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
- ret = mlx5dr_cmd_sync_steering(dmn->mdev);
-
- return ret;
-}
-
int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
{
if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
index 6fa06ba2d346..4fd4e8483382 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
@@ -1067,7 +1067,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
int inlen, err, eqn;
void *cqc, *in;
__be64 *pas;
- int vector;
u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -1096,8 +1095,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
- vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
- err = mlx5_comp_eqn_get(mdev, vector, &eqn);
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
@@ -1333,36 +1331,3 @@ void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
kfree(send_ring->sync_buff);
kfree(send_ring);
}
-
-int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
-{
- struct mlx5dr_send_ring *send_ring = dmn->send_ring;
- struct postsend_info send_info = {};
- u8 data[DR_STE_SIZE];
- int num_of_sends_req;
- int ret;
- int i;
-
- /* Sending this amount of requests makes sure we will get drain */
- num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2;
-
- /* Send fake requests forcing the last to be signaled */
- send_info.write.addr = (uintptr_t)data;
- send_info.write.length = DR_STE_SIZE;
- send_info.write.lkey = 0;
- /* Using the sync_mr in order to write/read */
- send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
- send_info.rkey = send_ring->sync_mr->mkey;
-
- for (i = 0; i < num_of_sends_req; i++) {
- ret = dr_postsend_icm_data(dmn, &send_info);
- if (ret)
- return ret;
- }
-
- spin_lock(&send_ring->lock);
- ret = dr_handle_pending_wc(dmn, send_ring);
- spin_unlock(&send_ring->lock);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
index e94fbb015efa..c8b8ff80c7c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
@@ -555,7 +555,7 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_tx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -566,7 +566,7 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_rx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -1458,6 +1458,8 @@ struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
return mlx5dr_ste_get_ctx_v1();
else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
return mlx5dr_ste_get_ctx_v2();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_8)
+ return mlx5dr_ste_get_ctx_v3();
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
index 54a6619c3ecb..3d5afc832fa5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
@@ -160,13 +160,15 @@ struct mlx5dr_ste_ctx {
/* Actions */
u32 actions_caps;
- void (*set_actions_rx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_rx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
- void (*set_actions_tx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_tx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
@@ -197,7 +199,21 @@ struct mlx5dr_ste_ctx {
u16 *used_hw_action_num);
int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action);
void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action);
-
+ /* Actions bit set */
+ void (*set_encap)(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size);
+ void (*set_push_vlan)(u8 *ste, u8 *d_action,
+ u32 vlan_hdr);
+ void (*set_pop_vlan)(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num);
+ void (*set_rx_decap)(u8 *hw_ste_p, u8 *s_action);
+ void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action,
+ u8 *scnd_d_action, u32 reformat_id,
+ int size);
+ void (*set_insert_hdr)(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+ void (*set_remove_hdr)(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
@@ -205,5 +221,6 @@ struct mlx5dr_ste_ctx {
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void);
#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
index e9f6c7ed7a7b..42536bee55e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
@@ -406,7 +406,8 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste,
}
static void
-dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -476,7 +477,8 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
}
static void
-dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
index 1d49704b9542..6447efbae00d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
@@ -5,136 +5,6 @@
#include "mlx5_ifc_dr_ste_v1.h"
#include "dr_ste_v1.h"
-#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
- ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
- DR_STE_V1_LU_TYPE_##lookup_type##_O)
-
-enum dr_ste_v1_entry_format {
- DR_STE_V1_TYPE_BWC_BYTE = 0x0,
- DR_STE_V1_TYPE_BWC_DW = 0x1,
- DR_STE_V1_TYPE_MATCH = 0x2,
- DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
-};
-
-/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
-enum {
- DR_STE_V1_LU_TYPE_NOP = 0x0000,
- DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
- DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
- DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
- DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
- DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
- DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
- DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
- DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
- DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
- DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
- DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
- DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
- DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
- DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
- DR_STE_V1_LU_TYPE_GRE = 0x010d,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
- DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
- DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
- DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
-};
-
-enum dr_ste_v1_header_anchors {
- DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
- DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
- DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
- DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
- DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
-};
-
-enum dr_ste_v1_action_size {
- DR_STE_ACTION_SINGLE_SZ = 4,
- DR_STE_ACTION_DOUBLE_SZ = 8,
- DR_STE_ACTION_TRIPLE_SZ = 12,
-};
-
-enum dr_ste_v1_action_insert_ptr_attr {
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
-};
-
-enum dr_ste_v1_action_id {
- DR_STE_V1_ACTION_ID_NOP = 0x00,
- DR_STE_V1_ACTION_ID_COPY = 0x05,
- DR_STE_V1_ACTION_ID_SET = 0x06,
- DR_STE_V1_ACTION_ID_ADD = 0x07,
- DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
- DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
- DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
- DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
- DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
- DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
- DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
- DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
- DR_STE_V1_ACTION_ID_ASO = 0x12,
- DR_STE_V1_ACTION_ID_TRAILER = 0x13,
- DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
- DR_STE_V1_ACTION_ID_MAX = 0x21,
- /* use for special cases */
- DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
-};
-
-enum {
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
- DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
- DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
- DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
-};
-
-enum dr_ste_v1_aso_ctx_type {
- DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
-};
-
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
@@ -379,13 +249,12 @@ static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
}
-static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
+void dr_ste_v1_set_reparse(u8 *hw_ste_p)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
}
-static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id, int size)
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -397,10 +266,10 @@ static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -417,9 +286,9 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -432,8 +301,7 @@ static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
- u32 vlan_hdr)
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr)
{
MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
@@ -446,7 +314,7 @@ static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -459,11 +327,8 @@ static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
- u8 *frst_s_action,
- u8 *scnd_d_action,
- u32 reformat_id,
- int size)
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size)
{
/* Remove L2 headers */
MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
@@ -483,7 +348,7 @@ static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
{
MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
@@ -620,7 +485,8 @@ static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
}
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -640,7 +506,7 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
@@ -677,8 +543,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -691,9 +557,9 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
@@ -706,10 +572,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
}
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
action += DR_STE_ACTION_TRIPLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -718,11 +584,11 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
@@ -731,10 +597,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -776,7 +642,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -799,7 +666,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_modify_hdr = false;
allow_ctr = false;
} else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
- dr_ste_v1_set_rx_decap(last_ste, action);
+ ste_ctx->set_rx_decap(last_ste, action);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_modify_hdr = false;
@@ -827,7 +694,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_ctr = false;
@@ -868,8 +735,8 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -895,9 +762,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -912,10 +779,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = false;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -925,11 +792,11 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -941,10 +808,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_modify_hdr = true;
allow_ctr = true;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -1027,9 +894,6 @@ void dr_ste_v1_set_action_copy(u8 *d_action,
MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
}
-#define DR_STE_DECAP_L3_ACTION_NUM 8
-#define DR_STE_L2_HDR_MAX_SZ 20
-
int dr_ste_v1_set_action_decap_l3_list(void *data,
u32 data_sz,
u8 *hw_action,
@@ -2330,7 +2194,14 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
.alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
.dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
index e2fc69867088..591c20c95a6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
@@ -7,6 +7,138 @@
#include "dr_types.h"
#include "dr_ste.h"
+#define DR_STE_DECAP_L3_ACTION_NUM 8
+#define DR_STE_L2_HDR_MAX_SZ 20
+#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
+ ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
+ DR_STE_V1_LU_TYPE_##lookup_type##_O)
+
+enum dr_ste_v1_entry_format {
+ DR_STE_V1_TYPE_BWC_BYTE = 0x0,
+ DR_STE_V1_TYPE_BWC_DW = 0x1,
+ DR_STE_V1_TYPE_MATCH = 0x2,
+ DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
+};
+
+/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
+enum {
+ DR_STE_V1_LU_TYPE_NOP = 0x0000,
+ DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
+ DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
+ DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
+ DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
+ DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
+ DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
+ DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
+ DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
+ DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
+ DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
+ DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
+ DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
+ DR_STE_V1_LU_TYPE_GRE = 0x010d,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
+ DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
+ DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
+ DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum dr_ste_v1_header_anchors {
+ DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
+ DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+};
+
+enum dr_ste_v1_action_size {
+ DR_STE_ACTION_SINGLE_SZ = 4,
+ DR_STE_ACTION_DOUBLE_SZ = 8,
+ DR_STE_ACTION_TRIPLE_SZ = 12,
+};
+
+enum dr_ste_v1_action_insert_ptr_attr {
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
+};
+
+enum dr_ste_v1_action_id {
+ DR_STE_V1_ACTION_ID_NOP = 0x00,
+ DR_STE_V1_ACTION_ID_COPY = 0x05,
+ DR_STE_V1_ACTION_ID_SET = 0x06,
+ DR_STE_V1_ACTION_ID_ADD = 0x07,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
+ DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
+ DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
+ DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_ASO = 0x12,
+ DR_STE_V1_ACTION_ID_TRAILER = 0x13,
+ DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
+ DR_STE_V1_ACTION_ID_MAX = 0x21,
+ /* use for special cases */
+ DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
+};
+
+enum {
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
+};
+
+enum dr_ste_v1_aso_ctx_type {
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
+};
+
bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
@@ -17,11 +149,22 @@ u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
+void dr_ste_v1_set_reparse(u8 *hw_ste_p);
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size);
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr);
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num);
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size);
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action);
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
u8 length, u32 data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
index 808b013cf48c..d0ebaf820d42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
@@ -2,167 +2,7 @@
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "dr_ste_v1.h"
-
-enum {
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
- DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
- DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
- DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
- DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
- DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
- DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
- DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
- DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
- DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
- DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
- DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
- DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
- DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
-};
-
-static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
- },
-};
+#include "dr_ste_v2.h"
static struct mlx5dr_ste_ctx ste_ctx_v2 = {
/* Builders */
@@ -223,7 +63,14 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = {
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
.alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
.dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
new file mode 100644
index 000000000000..d853fde49cfc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef _DR_STE_V2_
+#define _DR_STE_V2_
+
+enum {
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+ },
+};
+
+#endif /* _DR_STE_V2_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
new file mode 100644
index 000000000000..e468a9ae44e8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+#include "dr_ste_v2.h"
+
+static void dr_ste_v3_set_encap(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_push_vlan(u8 *ste, u8 *d_action,
+ u32 vlan_hdr)
+{
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to vlan header in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, start_offset,
+ HDR_LEN_L2_MACS >> 1);
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, inline_data, vlan_hdr);
+ dr_ste_v1_set_reparse(ste);
+}
+
+static void dr_ste_v3_set_pop_vlan(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_encap_l3(u8 *hw_ste_p,
+ u8 *frst_s_action,
+ u8 *scnd_d_action,
+ u32 reformat_id,
+ int size)
+{
+ /* Remove L2 headers */
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4);
+
+ /* Encapsulate with given reformat ID */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+{
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_MAC);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, u8 anchor,
+ u8 offset, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_offset, offset / 2);
+
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ attributes, DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset, int size)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, size / 2);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_offset, offset / 2);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static int
+dr_ste_v3_set_action_decap_l3_list(void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ uint16_t *used_hw_action_num)
+{
+ u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
+ void *data_ptr = padded_data;
+ u16 used_actions = 0;
+ u32 inline_data_sz;
+ u32 i;
+
+ if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
+ return -EINVAL;
+
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v3, inline_data);
+
+ /* Add an alignment padding */
+ memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++; /* Remove and NOP are a single double action */
+
+ /* Point to the last dword of the header */
+ data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
+
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
+ for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
+ void *addr_inline;
+
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to words (2 bytes) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, start_offset, 0);
+
+ /* Copy bytes one by one to avoid endianness problem */
+ addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v3,
+ hw_action, inline_data);
+ memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++;
+ }
+
+ /* Remove first 2 extra bytes */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, start_offset, 0);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, remove_size, 1);
+ used_actions++;
+
+ *used_hw_action_num = used_actions;
+
+ return 0;
+}
+
+static struct mlx5dr_ste_ctx ste_ctx_v3 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init =
+ &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+
+ /* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v2_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v3_set_action_decap_l3_list,
+ .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+ .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
+ /* Actions bit set */
+ .set_encap = &dr_ste_v3_set_encap,
+ .set_push_vlan = &dr_ste_v3_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v3_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v3_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v3_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v3_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v3_set_remove_hdr,
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void)
+{
+ return &ste_ctx_v3;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
index 7618c6147f86..cc328292bf84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
@@ -1473,7 +1473,6 @@ struct mlx5dr_send_ring {
int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
struct mlx5dr_send_ring *send_ring);
-int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
struct mlx5dr_ste *ste,
u8 *data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
index 4b349d4005e4..f367997ab61e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
@@ -521,7 +521,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
- id = dst->dest_attr.counter_id;
+ id = mlx5_fc_id(dst->dest_attr.counter);
tmp_action =
mlx5dr_action_create_flow_counter(id);
if (!tmp_action) {
@@ -833,15 +833,21 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
return steering_caps;
}
-int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
{
+ struct mlx5dr_action *dr_action;
+
switch (pkt_reformat->reformat_type) {
case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
case MLX5_REFORMAT_TYPE_INSERT_HDR:
- return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->fs_dr_action.dr_action);
+ dr_action = pkt_reformat->fs_dr_action.dr_action;
+ *reformat_id = mlx5dr_action_get_pkt_reformat_id(dr_action);
+ return 0;
}
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
index 99a3b2eff6b8..f869f2daefbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
@@ -38,7 +38,9 @@ struct mlx5_fs_dr_table {
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev);
-int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat);
+int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void);
@@ -49,9 +51,11 @@ static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
return NULL;
}
-static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+static inline int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
{
- return 0;
+ return -EOPNOTSUPP;
}
static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
index fb078fa0f0cc..898c3618ff26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
@@ -600,4 +600,44 @@ struct mlx5_ifc_ste_double_action_aso_v1_bits {
};
};
+struct mlx5_ifc_ste_single_action_remove_header_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 end_anchor[0x7];
+ u8 reserved_at_16[0x1];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x4];
+ u8 decap[0x1];
+ u8 vni_to_cqe[0x1];
+ u8 qos_profile[0x2];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_size_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x2];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_inline_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 reserved_at_17[0x9];
+
+ u8 inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_ptr_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 size[0x6];
+ u8 attributes[0x3];
+
+ u8 pointer[0x20];
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
index 3ac7dc67509f..fc8a2169d1a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
@@ -45,8 +45,6 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
int mlx5dr_domain_destroy(struct mlx5dr_domain *domain);
-int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
-
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn,
u16 peer_vhca_id);
@@ -160,7 +158,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
(MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_7)));
+ MLX5_STEERING_FORMAT_CONNECTX_8)));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 0d5f750faa45..da5c24fc7b30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -465,19 +465,22 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.node_guid);
-
+out:
kvfree(out);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
@@ -519,19 +522,22 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.qkey_violation_counter);
-
+out:
kvfree(out);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
@@ -1199,6 +1205,31 @@ int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *ou
}
EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ *vhca_id = 0;
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_general_cap(dev, vport, query_ctx);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
+
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
u16 vport, u16 opmod)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
index 1bed75eca97d..740b719e7072 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -382,6 +382,7 @@ err_alloc_bfreg:
bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
{
+ struct mutex *wc_state_lock = &mdev->wc_state_lock;
struct mlx5_core_dev *parent = NULL;
if (!MLX5_CAP_GEN(mdev, bf)) {
@@ -400,32 +401,31 @@ bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
*/
goto out;
- mutex_lock(&mdev->wc_state_lock);
-
- if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
- goto unlock;
-
#ifdef CONFIG_MLX5_SF
- if (mlx5_core_is_sf(mdev))
+ if (mlx5_core_is_sf(mdev)) {
parent = mdev->priv.parent_mdev;
+ wc_state_lock = &parent->wc_state_lock;
+ }
#endif
- if (parent) {
- mutex_lock(&parent->wc_state_lock);
+ mutex_lock(wc_state_lock);
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ goto unlock;
+
+ if (parent) {
mlx5_core_test_wc(parent);
mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
parent->wc_state);
mdev->wc_state = parent->wc_state;
- mutex_unlock(&parent->wc_state_lock);
+ } else {
+ mlx5_core_test_wc(mdev);
}
- mlx5_core_test_wc(mdev);
-
unlock:
- mutex_unlock(&mdev->wc_state_lock);
+ mutex_unlock(wc_state_lock);
out:
mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index fb2e5b844c15..d76d7a945899 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -447,8 +447,10 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0);
- if (phy_irq < 0) {
- dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
+ if (phy_irq == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto out;
+ } else if (phy_irq < 0) {
phy_irq = PHY_POLL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 46245e0b2462..43c84900369a 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -14,7 +14,6 @@
#define MLXFW_FSM_STATE_WAIT_TIMEOUT_MS 30000
#define MLXFW_FSM_STATE_WAIT_ROUNDS \
(MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS)
-#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20))
static const int mlxfw_fsm_state_errno[] = {
[MLXFW_FSM_STATE_ERR_ERROR] = -EIO,
@@ -229,7 +228,6 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
return err;
}
- comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE);
if (comp->data_size > comp_max_size) {
MLXFW_ERR_MSG(mlxfw_dev, extack,
"Component size is bigger than limit", -EINVAL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 4a79c0d7e7ad..2bb2b77351bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -35,6 +35,7 @@
#include "reg.h"
#include "resources.h"
#include "../mlxfw/mlxfw.h"
+#include "txheader.h"
static LIST_HEAD(mlxsw_core_driver_list);
static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
@@ -677,7 +678,7 @@ struct mlxsw_reg_trans {
struct list_head bulk_list;
struct mlxsw_core *core;
struct sk_buff *tx_skb;
- struct mlxsw_tx_info tx_info;
+ struct mlxsw_txhdr_info txhdr_info;
struct delayed_work timeout_dw;
unsigned int retries;
u64 tid;
@@ -737,12 +738,11 @@ static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
if (!skb)
return -ENOMEM;
- trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
- skb->data + mlxsw_core->driver->txhdr_len,
- skb->len - mlxsw_core->driver->txhdr_len);
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, skb->data,
+ skb->len);
atomic_set(&trans->active, 1);
- err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->txhdr_info);
if (err) {
dev_kfree_skb(skb);
return err;
@@ -944,7 +944,7 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
(MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
- sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ sizeof(u32) + MLXSW_TXHDR_LEN);
if (mlxsw_core->emad.enable_string_tlv)
emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
if (mlxsw_core->emad.enable_latency_tlv)
@@ -984,8 +984,8 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
list_add_tail(&trans->bulk_list, bulk_list);
trans->core = mlxsw_core;
trans->tx_skb = skb;
- trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
- trans->tx_info.is_emad = true;
+ trans->txhdr_info.tx_info.local_port = MLXSW_PORT_CPU_PORT;
+ trans->txhdr_info.tx_info.is_emad = true;
INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
trans->tid = tid;
init_completion(&trans->completion);
@@ -995,7 +995,6 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
trans->type = type;
mlxsw_emad_construct(mlxsw_core, skb, reg, payload, type, trans->tid);
- mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
@@ -2330,10 +2329,10 @@ bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
- tx_info);
+ txhdr_info);
}
EXPORT_SYMBOL(mlxsw_core_skb_transmit);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6d11225594dd..1a871397a6df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -72,7 +72,14 @@ struct mlxsw_tx_info {
bool is_emad;
};
+struct mlxsw_txhdr_info {
+ struct mlxsw_tx_info tx_info;
+ bool data;
+ u16 max_fid; /* Used for PTP packets which are sent as data. */
+};
+
struct mlxsw_rx_md_info {
+ struct napi_struct *napi;
u32 cookie_index;
u32 latency;
u32 tx_congestion;
@@ -94,7 +101,7 @@ struct mlxsw_rx_md_info {
bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ const struct mlxsw_txhdr_info *txhdr_info);
void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port);
@@ -425,8 +432,6 @@ struct mlxsw_driver {
int (*trap_policer_counter_get)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_policer *policer,
u64 *p_drops);
- void (*txhdr_construct)(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
@@ -439,7 +444,6 @@ struct mlxsw_driver {
void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port);
- u8 txhdr_len;
const struct mlxsw_config_profile *profile;
bool sdq_supports_cqe_v2;
};
@@ -486,7 +490,7 @@ struct mlxsw_bus {
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ const struct mlxsw_txhdr_info *txhdr_info);
int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
u32 in_mod, bool out_mbox_direct,
char *in_mbox, size_t in_mbox_size,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index e746cd9c68ed..eac9a14a6058 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -205,11 +205,11 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_params mlxsw_thermal_params = {
+static const struct thermal_zone_params mlxsw_thermal_params = {
.no_hwmon = true,
};
-static struct thermal_zone_device_ops mlxsw_thermal_ops = {
+static const struct thermal_zone_device_ops mlxsw_thermal_ops = {
.should_bind = mlxsw_thermal_should_bind,
.get_temp = mlxsw_thermal_get_temp,
};
@@ -252,7 +252,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
+static const struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_module_temp_get,
};
@@ -280,7 +280,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
+static const struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
.should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_gearbox_temp_get,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 1e150ce1c73a..f9f565c1036d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -516,7 +516,7 @@ static bool mlxsw_i2c_skb_transmit_busy(void *bus_priv,
}
static int mlxsw_i2c_skb_transmit(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index d6f37456fb31..058dcabfaa2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -21,6 +21,7 @@
#include "cmd.h"
#include "port.h"
#include "resources.h"
+#include "txheader.h"
#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
@@ -737,6 +738,7 @@ static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
}
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct napi_struct *napi,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
@@ -807,6 +809,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
}
mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+ mlxsw_skb_cb(skb)->rx_md_info.napi = napi;
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
@@ -869,7 +872,7 @@ static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget)
continue;
}
- mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, napi, rdq,
wqe_counter, q->u.cq.v, cqe);
if (++work_done == budget)
@@ -2093,6 +2096,39 @@ static void mlxsw_pci_fini(void *bus_priv)
mlxsw_pci_free_irq_vectors(mlxsw_pci);
}
+static int mlxsw_pci_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_txhdr_info *txhdr_info)
+{
+ const struct mlxsw_tx_info tx_info = txhdr_info->tx_info;
+ char *txhdr;
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN))
+ return -ENOMEM;
+
+ txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+
+ if (unlikely(txhdr_info->data)) {
+ u16 fid = txhdr_info->max_fid + tx_info.local_port - 1;
+
+ mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
+ mlxsw_tx_hdr_fid_valid_set(txhdr, true);
+ mlxsw_tx_hdr_fid_set(txhdr, fid);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
+ } else {
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info.local_port);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+ }
+
+ return 0;
+}
+
static struct mlxsw_pci_queue *
mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
const struct mlxsw_tx_info *tx_info)
@@ -2120,7 +2156,7 @@ static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
}
static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
struct mlxsw_pci_queue *q;
@@ -2129,13 +2165,17 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
int i;
int err;
+ err = mlxsw_pci_txhdr_construct(skb, txhdr_info);
+ if (err)
+ return err;
+
if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
err = skb_linearize(skb);
if (err)
return err;
}
- q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+ q = mlxsw_pci_sdq_pick(mlxsw_pci, &txhdr_info->tx_info);
spin_lock_bh(&q->lock);
elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
if (!elem_info) {
@@ -2143,7 +2183,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
err = -EAGAIN;
goto unlock;
}
- mlxsw_skb_cb(skb)->tx_info = *tx_info;
+ mlxsw_skb_cb(skb)->tx_info = txhdr_info->tx_info;
elem_info->sdq.skb = skb;
wqe = elem_info->elem;
@@ -2174,6 +2214,8 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+ mlxsw_pci_wqe_ipcs_set(wqe, skb->ip_summed == CHECKSUM_PARTIAL);
+
/* Everything is set up, ring producer doorbell to get HW going */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 6bed495dcf0f..7fa94e5828de 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -90,6 +90,11 @@ MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
*/
MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+/* pci_wqe_ipcs
+ * Calculate IPv4 and TCP / UDP checksums.
+ */
+MLXSW_ITEM32(pci, wqe, ipcs, 0x00, 14, 1);
+
/* pci_wqe_byte_count
* Size of i-th scatter/gather entry, 0 if entry is unused.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3f5e5d99251b..618957d65663 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -107,74 +107,6 @@ static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
};
-/* tx_hdr_version
- * Tx header version.
- * Must be set to 1.
- */
-MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
-
-/* tx_hdr_ctl
- * Packet control type.
- * 0 - Ethernet control (e.g. EMADs, LACP)
- * 1 - Ethernet data
- */
-MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
-
-/* tx_hdr_proto
- * Packet protocol type. Must be set to 1 (Ethernet).
- */
-MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
-
-/* tx_hdr_rx_is_router
- * Packet is sent from the router. Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
-
-/* tx_hdr_fid_valid
- * Indicates if the 'fid' field is valid and should be used for
- * forwarding lookup. Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
-
-/* tx_hdr_swid
- * Switch partition ID. Must be set to 0.
- */
-MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
-
-/* tx_hdr_control_tclass
- * Indicates if the packet should use the control TClass and not one
- * of the data TClasses.
- */
-MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
-
-/* tx_hdr_etclass
- * Egress TClass to be used on the egress device on the egress port.
- */
-MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
-
-/* tx_hdr_port_mid
- * Destination local port for unicast packets.
- * Destination multicast ID for multicast packets.
- *
- * Control packets are directed to a specific egress port, while data
- * packets are transmitted through the CPU port (0) into the switch partition,
- * where forwarding rules are applied.
- */
-MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
-
-/* tx_hdr_fid
- * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
- * set, otherwise calculated based on the packet's VID using VID to FID mapping.
- * Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
-
-/* tx_hdr_type
- * 0 - Data packets
- * 6 - Control packets
- */
-MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
-
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, bool clear,
u64 *packets, u64 *bytes)
@@ -233,61 +165,6 @@ void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
counter_index);
}
-void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
-
- memset(txhdr, 0, MLXSW_TXHDR_LEN);
-
- mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
- mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
- mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
- mlxsw_tx_hdr_swid_set(txhdr, 0);
- mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
- mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
- mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
-}
-
-int
-mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- char *txhdr;
- u16 max_fid;
- int err;
-
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- err = -ENOMEM;
- goto err_skb_cow_head;
- }
-
- if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
- err = -EIO;
- goto err_res_valid;
- }
- max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
-
- txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
- memset(txhdr, 0, MLXSW_TXHDR_LEN);
-
- mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
- mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
- mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
- mlxsw_tx_hdr_fid_valid_set(txhdr, true);
- mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
- mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
- return 0;
-
-err_res_valid:
-err_skb_cow_head:
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return err;
-}
-
static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
{
unsigned int type;
@@ -299,30 +176,49 @@ static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
return !!ptp_parse_header(skb, type);
}
-static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static void mlxsw_sp_txhdr_info_data_init(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ struct mlxsw_txhdr_info *txhdr_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ /* Resource validation was done as part of PTP init. */
+ u16 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
+
+ txhdr_info->data = true;
+ txhdr_info->max_fid = max_fid;
+}
- /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
- * need special handling and cannot be transmitted as regular control
- * packets.
+static struct sk_buff *
+mlxsw_sp_vlan_tag_push(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb)
+{
+ /* In some Spectrum ASICs, in order for PTP event packets to have their
+ * correction field correctly set on the egress port they must be
+ * transmitted as data packets. Such packets ingress the ASIC via the
+ * CPU port and must have a VLAN tag, as the CPU port is not configured
+ * with a PVID. Push the default VLAN (4095), which is configured as
+ * egress untagged on all the ports.
*/
- if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
- return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
- mlxsw_sp_port, skb,
- tx_info);
+ if (skb_vlan_tagged(skb))
+ return skb;
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return -ENOMEM;
- }
+ return vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ MLXSW_SP_DEFAULT_VID);
+}
- mlxsw_sp_txhdr_construct(skb, tx_info);
- return 0;
+static struct sk_buff *
+mlxsw_sp_txhdr_preparations(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ struct mlxsw_txhdr_info *txhdr_info)
+{
+ if (likely(!mlxsw_sp_skb_requires_ts(skb)))
+ return skb;
+
+ if (!mlxsw_sp->ptp_ops->tx_as_data)
+ return skb;
+
+ /* Special handling for PTP events that require a time stamp and cannot
+ * be transmitted as regular control packets.
+ */
+ mlxsw_sp_txhdr_info_data_init(mlxsw_sp->core, skb, txhdr_info);
+ return mlxsw_sp_vlan_tag_push(mlxsw_sp, skb);
}
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
@@ -721,16 +617,16 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
- const struct mlxsw_tx_info tx_info = {
- .local_port = mlxsw_sp_port->local_port,
- .is_emad = false,
+ struct mlxsw_txhdr_info txhdr_info = {
+ .tx_info.local_port = mlxsw_sp_port->local_port,
+ .tx_info.is_emad = false,
};
u64 len;
int err;
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
- if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &txhdr_info.tx_info))
return NETDEV_TX_BUSY;
if (eth_skb_pad(skb)) {
@@ -738,10 +634,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
- &tx_info);
- if (err)
+ skb = mlxsw_sp_txhdr_preparations(mlxsw_sp, skb, &txhdr_info);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
return NETDEV_TX_OK;
+ }
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
@@ -751,7 +648,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
- err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &txhdr_info);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
@@ -1262,63 +1159,31 @@ static int mlxsw_sp_set_features(struct net_device *dev,
return 0;
}
-static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct ifreq *ifr)
+static int mlxsw_sp_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
- int err;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
- &config);
- if (err)
- return err;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- return 0;
+ return mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
+ config, extack);
}
-static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct ifreq *ifr)
+static int mlxsw_sp_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
- int err;
-
- err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
- &config);
- if (err)
- return err;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- return 0;
+ return mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
+ config);
}
static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct hwtstamp_config config = {0};
+ struct kernel_hwtstamp_config config = {};
- mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
-}
-
-static int
-mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
- case SIOCGHWTSTAMP:
- return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config,
+ NULL);
}
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
@@ -1335,7 +1200,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_set_features = mlxsw_sp_set_features,
- .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
+ .ndo_hwtstamp_get = mlxsw_sp_port_hwtstamp_get,
+ .ndo_hwtstamp_set = mlxsw_sp_port_hwtstamp_set,
};
static int
@@ -1677,10 +1543,12 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
netif_carrier_off(dev);
dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_TC;
- dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
+ NETIF_F_HW_TC | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->lltx = true;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
@@ -2449,7 +2317,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u64_stats_update_end(&pcpu_stats->syncp);
skb->protocol = eth_type_trans(skb, skb->dev);
- netif_receive_skb(skb);
+ napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
}
static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
@@ -2510,8 +2378,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
- /* NVE traps */
- MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
};
static const struct mlxsw_listener mlxsw_sp1_listener[] = {
@@ -2792,7 +2658,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.get_stats_count = mlxsw_sp1_get_stats_count,
.get_stats_strings = mlxsw_sp1_get_stats_strings,
.get_stats = mlxsw_sp1_get_stats,
- .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -2811,7 +2676,7 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
- .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
+ .tx_as_data = true,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
@@ -2830,7 +2695,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
- .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
struct mlxsw_sp_sample_trigger_node {
@@ -3992,11 +3856,9 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
.sdq_supports_cqe_v2 = false,
};
@@ -4030,10 +3892,8 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -4067,10 +3927,8 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -4102,10 +3960,8 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp4_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -5343,25 +5199,13 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
return -EOPNOTSUPP;
- if (cu_info->linking) {
- if (!netif_running(dev))
- return 0;
- /* When the bridge is VLAN-aware, the VNI of the VxLAN
- * device needs to be mapped to a VLAN, but at this
- * point no VLANs are configured on the VxLAN device
- */
- if (br_vlan_enabled(upper_dev))
- return 0;
+ if (!netif_running(dev))
+ return 0;
+ if (cu_info->linking)
return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
dev, 0, extack);
- } else {
- /* VLANs were already flushed, which triggered the
- * necessary cleanup
- */
- if (br_vlan_enabled(upper_dev))
- return 0;
+ else
mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
- }
break;
case NETDEV_PRE_UP:
upper_dev = netdev_master_upper_dev_get(dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 8d3c61287696..b03ff9e044f9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -233,9 +233,10 @@ struct mlxsw_sp_ptp_ops {
u16 local_port);
int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void (*shaper_work)(struct work_struct *work);
int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
struct kernel_ethtool_ts_info *info);
@@ -243,10 +244,7 @@ struct mlxsw_sp_ptp_ops {
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
- int (*txhdr_construct)(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ bool tx_as_data;
};
struct mlxsw_sp_fid_core_ops {
@@ -354,7 +352,7 @@ struct mlxsw_sp_port {
struct mlxsw_sp_flow_block *eg_flow_block;
struct {
struct delayed_work shaper_dw;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
u16 ing_types;
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
@@ -664,10 +662,10 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev);
int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
- const struct net_device *vxlan_dev, u16 vid,
+ struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack);
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *vxlan_dev);
+ struct net_device *vxlan_dev);
extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
@@ -711,12 +709,6 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
-void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
@@ -763,9 +755,6 @@ void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
-bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
-u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
enum mlxsw_sp_l3proto ul_proto,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index a54eedb69a3f..067f0055a55a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -212,7 +212,22 @@ static const u8 mlxsw_sp4_acl_bf_crc6_tab[256] = {
* This array defines key offsets for easy access when copying key blocks from
* entry key to Bloom filter chunk.
*/
-static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
+static char *
+mlxsw_sp_acl_bf_enc_key_get(struct mlxsw_sp_acl_atcam_entry *aentry,
+ u8 chunk_index)
+{
+ switch (chunk_index) {
+ case 0:
+ return &aentry->ht_key.enc_key[2];
+ case 1:
+ return &aentry->ht_key.enc_key[20];
+ case 2:
+ return &aentry->ht_key.enc_key[38];
+ default:
+ WARN_ON_ONCE(1);
+ return &aentry->ht_key.enc_key[0];
+ }
+}
static u16 mlxsw_sp2_acl_bf_crc16_byte(u16 crc, u8 c)
{
@@ -235,9 +250,10 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
u8 key_offset, u8 chunk_key_len, u8 chunk_len)
{
struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
- u8 chunk_index, chunk_count, block_count;
+ u8 chunk_index, chunk_count;
char *chunk = output;
__be16 erp_region_id;
+ u32 block_count;
block_count = mlxsw_afk_key_info_blocks_count_get(key_info);
chunk_count = 1 + ((block_count - 1) >> 2);
@@ -245,12 +261,13 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
(aregion->region->id << 4));
for (chunk_index = max_chunks - chunk_count; chunk_index < max_chunks;
chunk_index++) {
+ char *enc_key;
+
memset(chunk, 0, pad_bytes);
memcpy(chunk + pad_bytes, &erp_region_id,
sizeof(erp_region_id));
- memcpy(chunk + key_offset,
- &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
- chunk_key_len);
+ enc_key = mlxsw_sp_acl_bf_enc_key_get(aentry, chunk_index);
+ memcpy(chunk + key_offset, enc_key, chunk_key_len);
chunk += chunk_len;
}
*len = chunk_count * chunk_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index 6fe185ea6732..1850a975b380 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -324,6 +324,10 @@ static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] =
MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */
};
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1b[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER, 0x04, 20, 12),
};
@@ -341,7 +345,7 @@ static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = {
MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x1A, mlxsw_sp_afk_element_info_mac_5b),
MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x38, mlxsw_sp_afk_element_info_ipv4_0),
- MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x3F, mlxsw_sp_afk_element_info_ipv4_1b),
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
MLXSW_AFK_BLOCK(0x36, mlxsw_sp_afk_element_info_ipv4_5b),
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 2bed8c86b7cf..0a8fb9c842d3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -262,7 +262,7 @@ err_port_pause_configure:
}
struct mlxsw_sp_port_hw_stats {
- char str[ETH_GSTRING_LEN];
+ char str[ETH_GSTRING_LEN] __nonstring;
u64 (*getter)(const char *payload);
bool cells_bytes;
};
@@ -768,7 +768,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
if (err)
return;
- mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ err = mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ if (err)
+ return;
for (i = 0; i < len; i++) {
data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
if (!hw_stats[i].cells_bytes)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index f07955b5439f..6a4a81c63451 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -192,6 +192,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (sample_act_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Mirror action after sample action is not supported");
+ return -EOPNOTSUPP;
+ }
+
err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
block, out_dev,
extack);
@@ -265,6 +270,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (mirror_act_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action after mirror action is not supported");
+ return -EOPNOTSUPP;
+ }
+
err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei,
block,
act->sample.psample_group,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 69cd689dbc83..5afe6b155ef0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -1003,10 +1003,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
&bytes);
- if (mr_route->mfc->mfc_un.res.pkt != packets)
- mr_route->mfc->mfc_un.res.lastuse = jiffies;
- mr_route->mfc->mfc_un.res.pkt = packets;
- mr_route->mfc->mfc_un.res.bytes = bytes;
+ if (atomic_long_read(&mr_route->mfc->mfc_un.res.pkt) != packets)
+ WRITE_ONCE(mr_route->mfc->mfc_un.res.lastuse, jiffies);
+ atomic_long_set(&mr_route->mfc->mfc_un.res.pkt, packets);
+ atomic_long_set(&mr_route->mfc->mfc_un.res.bytes, bytes);
}
static void mlxsw_sp_mr_stats_update(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index d94081c7658e..e8182dd76c7d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -46,7 +46,7 @@ struct mlxsw_sp2_ptp_state {
refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
* enabled.
*/
- struct hwtstamp_config config;
+ struct kernel_hwtstamp_config config;
struct mutex lock; /* Protects 'config' and HW configuration. */
};
@@ -1083,14 +1083,14 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
}
int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
*config = mlxsw_sp_port->ptp.hwtstamp_config;
return 0;
}
static int
-mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
+mlxsw_sp1_ptp_get_message_types(const struct kernel_hwtstamp_config *config,
u16 *p_ing_types, u16 *p_egr_types,
enum hwtstamp_rx_filters *p_rx_filter)
{
@@ -1246,7 +1246,8 @@ void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
}
int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
enum hwtstamp_rx_filters rx_filter;
u16 ing_types;
@@ -1270,7 +1271,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- /* Notify the ioctl caller what we are actually timestamping. */
+ /* Notify the caller what we are actually timestamping. */
config->rx_filter = rx_filter;
return 0;
@@ -1353,6 +1354,10 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
+ /* Max FID will be used in data path, check validity as part of init. */
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, FID))
+ return ERR_PTR(-EIO);
+
ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
if (!ptp_state)
return ERR_PTR(-ENOMEM);
@@ -1447,7 +1452,7 @@ void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
}
int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
@@ -1461,7 +1466,7 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
+mlxsw_sp2_ptp_get_message_types(const struct kernel_hwtstamp_config *config,
u16 *p_ing_types, u16 *p_egr_types,
enum hwtstamp_rx_filters *p_rx_filter)
{
@@ -1538,7 +1543,7 @@ static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
u16 egr_types,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
int err;
@@ -1552,7 +1557,7 @@ static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
}
static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
int err;
@@ -1567,7 +1572,7 @@ static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
u16 ing_types, u16 egr_types,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
@@ -1588,7 +1593,7 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
@@ -1610,11 +1615,12 @@ err_ptp_disable:
}
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
+ struct kernel_hwtstamp_config new_config;
struct mlxsw_sp2_ptp_state *ptp_state;
enum hwtstamp_rx_filters rx_filter;
- struct hwtstamp_config new_config;
u16 new_ing_types, new_egr_types;
bool ptp_enabled;
int err;
@@ -1648,7 +1654,7 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->ptp.ing_types = new_ing_types;
mlxsw_sp_port->ptp.egr_types = new_egr_types;
- /* Notify the ioctl caller what we are actually timestamping. */
+ /* Notify the caller what we are actually timestamping. */
config->rx_filter = rx_filter;
mutex_unlock(&ptp_state->lock);
@@ -1679,43 +1685,3 @@ int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return -ENOMEM;
- }
-
- mlxsw_sp_txhdr_construct(skb, tx_info);
- return 0;
-}
-
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
- * their correction field correctly set on the egress port they must be
- * transmitted as data packets. Such packets ingress the ASIC via the
- * CPU port and must have a VLAN tag, as the CPU port is not configured
- * with a PVID. Push the default VLAN (4095), which is configured as
- * egress untagged on all the ports.
- */
- if (!skb_vlan_tagged(skb)) {
- skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
- MLXSW_SP_DEFAULT_VID);
- if (!skb) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- return -ENOMEM;
- }
- }
-
- return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
- tx_info);
-}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index c8aa1452fbb9..df37f1470830 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -34,10 +34,11 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
u64 timestamp);
int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
@@ -49,11 +50,6 @@ void mlxsw_sp1_get_stats_strings(u8 **p);
void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-
struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
@@ -70,19 +66,15 @@ void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
struct sk_buff *skb, u16 local_port);
int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct kernel_ethtool_ts_info *info);
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-
#else
static inline struct mlxsw_sp_ptp_clock *
@@ -127,14 +119,15 @@ mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
static inline int
mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
static inline int
mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -157,15 +150,6 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
{
}
-static inline int
-mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- return -EOPNOTSUPP;
-}
-
static inline struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
{
@@ -200,23 +184,15 @@ static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
static inline int
mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
static inline int
mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7d6d859cef3f..a2033837182e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3014,6 +3014,9 @@ static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
.rif = rif,
};
+ if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif)))
+ return 0;
+
neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
if (rms.err)
goto err_arp;
@@ -8184,41 +8187,6 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev)
-{
- struct mlxsw_sp_rif *rif;
-
- mutex_lock(&mlxsw_sp->router->lock);
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- mutex_unlock(&mlxsw_sp->router->lock);
-
- return rif;
-}
-
-u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
-{
- struct mlxsw_sp_rif *rif;
- u16 vid = 0;
-
- mutex_lock(&mlxsw_sp->router->lock);
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!rif)
- goto out;
-
- /* We only return the VID for VLAN RIFs. Otherwise we return an
- * invalid value (0).
- */
- if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
- goto out;
-
- vid = mlxsw_sp_fid_8021q_vid(rif->fid);
-
-out:
- mutex_unlock(&mlxsw_sp->router->lock);
- return vid;
-}
-
static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -8417,19 +8385,6 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
return lb_rif->common.rif_index;
}
-u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
-{
- struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
- u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
- struct mlxsw_sp_vr *ul_vr;
-
- ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
- if (WARN_ON(IS_ERR(ul_vr)))
- return 0;
-
- return ul_vr->id;
-}
-
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
{
return lb_rif->ul_rif_id;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 0432c7cc6b07..313efab5c324 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -90,7 +90,6 @@ struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
-u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 4b5fd71c897d..32d2e61f2b82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
- 0);
+ 0, 0, tun->net, parms.link, tun->fwmark, 0, 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6397ff0dc951..a48bf342084d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2929,23 +2929,8 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
}
-int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- const struct net_device *vxlan_dev, u16 vid,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_bridge_device *bridge_device;
-
- bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
- if (WARN_ON(!bridge_device))
- return -EINVAL;
-
- return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
- extack);
-}
-
-void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *vxlan_dev)
+static void __mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *vxlan_dev)
{
struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
struct mlxsw_sp_fid *fid;
@@ -2963,6 +2948,47 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_put(fid);
}
+int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(bridge_device->dev);
+ if (!mlxsw_sp_port)
+ return -EINVAL;
+
+ err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
+ if (err)
+ return err;
+
+ err = switchdev_bridge_port_offload(vxlan_dev, mlxsw_sp_port->dev,
+ NULL, NULL, NULL, false, extack);
+ if (err)
+ goto err_bridge_port_offload;
+
+ return 0;
+
+err_bridge_port_offload:
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ return err;
+}
+
+void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vxlan_dev)
+{
+ switchdev_bridge_port_unoffload(vxlan_dev, NULL, NULL, NULL);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+}
+
static void
mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
enum mlxsw_sp_l3proto *proto,
@@ -3867,7 +3893,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_put(fid);
return -EINVAL;
}
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
mlxsw_sp_fid_put(fid);
return 0;
}
@@ -3883,7 +3909,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
* mapped to the VNI should be unmapped
*/
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
mlxsw_sp_fid_put(fid);
/* Fifth case: The new VLAN is also egress untagged, which means the
@@ -3923,7 +3949,7 @@ mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp_fid_8021q_vid(fid) != vid)
goto out;
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
out:
mlxsw_sp_fid_put(fid);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 899c954e0e5f..b5c3f789c685 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -173,7 +173,7 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
if (err)
return;
- netif_receive_skb(skb);
+ napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
}
static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port,
@@ -959,18 +959,18 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
- MIRROR),
+ TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ROUTER_ARPBC, NEIGH_DISCOVERY,
- TRAP_TO_CPU, false),
+ MLXSW_SP_RXL_NO_MARK(ARPBC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
- MIRROR),
+ TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ROUTER_ARPUC, NEIGH_DISCOVERY,
- TRAP_TO_CPU, false),
+ MLXSW_SP_RXL_NO_MARK(ARPUC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 83477c8e6971..80ee5c4825dc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -29,6 +29,8 @@ enum {
MLXSW_TRAP_ID_FDB_MISMATCH = 0x3B,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
+ MLXSW_TRAP_ID_ARPBC = 0x50,
+ MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
@@ -66,13 +68,10 @@ enum {
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
- MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_IPV4_BFD = 0xD0,
MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
- MLXSW_TRAP_ID_ROUTER_ARPBC = 0xE0,
- MLXSW_TRAP_ID_ROUTER_ARPUC = 0xE1,
MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
index da51dd9d5e44..e78cba5821b6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -4,6 +4,69 @@
#ifndef _MLXSW_TXHEADER_H
#define _MLXSW_TXHEADER_H
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_rx_is_router
+ * Packet is sent from the router. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
+
+/* tx_hdr_fid_valid
+ * Indicates if the 'fid' field is valid and should be used for
+ * forwarding lookup. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
+
+/* tx_hdr_swid
+ * Switch partition ID. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_control_tclass
+ * Indicates if the packet should use the control TClass and not one
+ * of the data TClasses.
+ */
+MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_fid
+ * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
+ * set, otherwise calculated based on the packet's VID using VID to FID mapping.
+ * Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
#define MLXSW_TXHDR_LEN 0x10
#define MLXSW_TXHDR_VERSION_0 0
#define MLXSW_TXHDR_VERSION_1 1
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
index 831921b9d4d5..3ba527514f1e 100644
--- a/drivers/net/ethernet/meta/Kconfig
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -27,6 +27,7 @@ config FBNIC
select NET_DEVLINK
select PAGE_POOL
select PHYLINK
+ select PLDMFW
help
This driver supports Meta Platforms Host Network Interface.
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index 239b2258ec65..0dbc634adb4b 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -20,6 +20,7 @@ fbnic-y := fbnic_csr.o \
fbnic_pci.o \
fbnic_phylink.o \
fbnic_rpc.o \
+ fbnic_time.o \
fbnic_tlv.o \
fbnic_txrx.o \
- fbnic_time.o
+# End of objects
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index 706ae6104c8e..65815d4f379e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -16,6 +16,11 @@
#include "fbnic_mac.h"
#include "fbnic_rpc.h"
+struct fbnic_napi_vector;
+
+#define FBNIC_MAX_NAPI_VECTORS 128u
+#define FBNIC_MBX_CMPL_SLOTS 4
+
struct fbnic_dev {
struct device *dev;
struct net_device *netdev;
@@ -29,11 +34,16 @@ struct fbnic_dev {
unsigned int pcs_msix_vector;
unsigned short num_irqs;
+ struct {
+ u8 users;
+ char name[IFNAMSIZ + 9];
+ } napi_irq[FBNIC_MAX_NAPI_VECTORS];
+
struct delayed_work service_task;
struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
struct fbnic_fw_cap fw_cap;
- struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_fw_completion *cmpl_data[FBNIC_MBX_CMPL_SLOTS];
/* Lock protecting Tx Mailbox queue to prevent possible races */
spinlock_t fw_tx_lock;
@@ -51,6 +61,12 @@ struct fbnic_dev {
u8 mac_addr_boundary;
u8 tce_tcam_last;
+ /* IP TCAM */
+ struct fbnic_ip_addr ip_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ip_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ipo_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ipo_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+
/* Number of TCQs/RCQs available on hardware */
u16 max_num_queues;
@@ -66,6 +82,9 @@ struct fbnic_dev {
/* Local copy of hardware statistics */
struct fbnic_hw_stats hw_stats;
+
+ /* Lock protecting access to hw_stats */
+ spinlock_t hw_stats_lock;
};
/* Reserve entry 0 in the MSI-X "others" array until we have filled all
@@ -139,15 +158,21 @@ struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev);
void fbnic_devlink_register(struct fbnic_dev *fbd);
void fbnic_devlink_unregister(struct fbnic_dev *fbd);
-int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
-void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
+int fbnic_fw_request_mbx(struct fbnic_dev *fbd);
+void fbnic_fw_free_mbx(struct fbnic_dev *fbd);
void fbnic_hwmon_register(struct fbnic_dev *fbd);
void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
-int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
-void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
+int fbnic_pcs_request_irq(struct fbnic_dev *fbd);
+void fbnic_pcs_free_irq(struct fbnic_dev *fbd);
+void fbnic_napi_name_irqs(struct fbnic_dev *fbd);
+int fbnic_napi_request_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv);
+void fbnic_napi_free_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv);
+void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr);
int fbnic_request_irq(struct fbnic_dev *dev, int nr, irq_handler_t handler,
unsigned long flags, const char *name, void *data);
void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data);
@@ -165,6 +190,9 @@ void fbnic_dbg_exit(void);
void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version);
int fbnic_csr_regs_len(struct fbnic_dev *fbd);
+void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm);
+void fbnic_config_rx_frames(struct fbnic_napi_vector *nv);
+
enum fbnic_boards {
fbnic_board_asic
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
index 2118901b25e9..d9c0dc1c2af9 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
@@ -30,6 +30,7 @@ static const struct fbnic_csr_bounds fbnic_csr_sects[] = {
FBNIC_BOUNDS(RSFEC),
FBNIC_BOUNDS(MAC_MAC),
FBNIC_BOUNDS(SIG),
+ FBNIC_BOUNDS(PCIE_SS_COMPHY),
FBNIC_BOUNDS(PUL_USER),
FBNIC_BOUNDS(QUEUE),
FBNIC_BOUNDS(RPC_RAM),
@@ -64,7 +65,7 @@ static void fbnic_csr_get_regs_rpc_ram(struct fbnic_dev *fbd, u32 **data_p)
u32 i, j;
*(data++) = start;
- *(data++) = end - 1;
+ *(data++) = end;
/* FBNIC_RPC_TCAM_ACT */
for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 02bb81b3c506..36393a17d92d 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -397,6 +397,15 @@ enum {
#define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1)
#define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2)
+#define FBNIC_TCE_TTI_CM_DROP_PKTS 0x0403e /* 0x100f8 */
+#define FBNIC_TCE_TTI_CM_DROP_BYTE_L 0x0403f /* 0x100fc */
+#define FBNIC_TCE_TTI_CM_DROP_BYTE_H 0x04040 /* 0x10100 */
+#define FBNIC_TCE_TTI_FRAME_DROP_PKTS 0x04041 /* 0x10104 */
+#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_L 0x04042 /* 0x10108 */
+#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_H 0x04043 /* 0x1010c */
+#define FBNIC_TCE_TBI_DROP_PKTS 0x04044 /* 0x10110 */
+#define FBNIC_TCE_TBI_DROP_BYTE_L 0x04045 /* 0x10114 */
+
#define FBNIC_TCE_TCAM_IDX2DEST_MAP 0x0404A /* 0x10128 */
#define FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 CSR_GENMASK(3, 0)
enum {
@@ -432,6 +441,11 @@ enum {
#define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */
#define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */
#define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0)
+#define FBNIC_TMI_DROP_PKTS 0x04402 /* 0x11008 */
+#define FBNIC_TMI_DROP_BYTE_L 0x04403 /* 0x1100c */
+#define FBNIC_TMI_ILLEGAL_PTP_REQS 0x04409 /* 0x11024 */
+#define FBNIC_TMI_GOOD_PTP_TS 0x0440a /* 0x11028 */
+#define FBNIC_TMI_BAD_PTP_TS 0x0440b /* 0x1102c */
#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */
/* Precision Time Protocol Registers */
@@ -485,6 +499,14 @@ enum {
FBNIC_RXB_FIFO_INDICES = 8
};
+enum {
+ FBNIC_RXB_INTF_NET = 0,
+ FBNIC_RXB_INTF_RBT = 1,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_INTF_INDICES = 4
+};
+
#define FBNIC_RXB_CT_SIZE(n) (0x08000 + (n)) /* 0x20000 + 4*n */
#define FBNIC_RXB_CT_SIZE_CNT 8
#define FBNIC_RXB_CT_SIZE_HEADER CSR_GENMASK(5, 0)
@@ -605,8 +627,11 @@ enum {
FBNIC_RPC_ACT_TBL0_DEST_EI = 4,
};
+#define FBNIC_RPC_ACT_TBL0_Q_SEL CSR_BIT(4)
+#define FBNIC_RPC_ACT_TBL0_Q_ID CSR_GENMASK(15, 8)
#define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16)
#define FBNIC_RPC_ACT_TBL0_TS_ENA CSR_BIT(28)
+#define FBNIC_RPC_ACT_TBL0_ACT_TBL_IDX CSR_BIT(29)
#define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30)
#define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */
@@ -677,6 +702,9 @@ enum {
#define FBNIC_RPC_TCAM_OUTER_IPSRC(m, n)\
(0x08c00 + 0x08 * (n) + (m)) /* 0x023000 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_IP_ADDR_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_IP_ADDR_MASK CSR_GENMASK(31, 16)
+
#define FBNIC_RPC_TCAM_OUTER_IPDST(m, n)\
(0x08c48 + 0x08 * (n) + (m)) /* 0x023120 + 32*n + 4*m */
#define FBNIC_RPC_TCAM_IPSRC(m, n)\
@@ -782,13 +810,54 @@ enum {
#define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */
#define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */
#define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */
+
+/* PCIE Comphy Registers */
+#define FBNIC_CSR_START_PCIE_SS_COMPHY 0x2442e /* CSR section delimiter */
+#define FBNIC_CSR_END_PCIE_SS_COMPHY 0x279d7 /* CSR section delimiter */
+
/* PUL User Registers */
#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH CSR_BIT(19)
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18)
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH CSR_BIT(19)
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18)
-#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
+#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \
+ 0x3106e /* 0xc41b8 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \
+ 0x31070 /* 0xc41c0 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \
+ 0x31071 /* 0xc41c4 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \
+ 0x31072 /* 0xc41c8 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \
+ 0x31073 /* 0xc41cc */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \
+ 0x31074 /* 0xc41d0 */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \
+ 0x31075 /* 0xc41d4 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \
+ 0x31076 /* 0xc41d8 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \
+ 0x31077 /* 0xc41dc */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \
+ 0x31078 /* 0xc41e0 */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \
+ 0x31079 /* 0xc41e4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \
+ 0x3107a /* 0xc41e8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \
+ 0x3107b /* 0xc41ec */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \
+ 0x3107c /* 0xc41f0 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \
+ 0x3107d /* 0xc41f4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \
+ 0x3107e /* 0xc41f8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
+ 0x3107f /* 0xc41fc */
+#define FBNIC_CSR_END_PUL_USER 0x310ea /* CSR section delimiter */
/* Queue Registers
*
@@ -819,6 +888,12 @@ enum {
#define FBNIC_QUEUE_TWQ1_BAL 0x022 /* 0x088 */
#define FBNIC_QUEUE_TWQ1_BAH 0x023 /* 0x08c */
+/* Tx Work Queue Statistics Registers */
+#define FBNIC_QUEUE_TWQ0_PKT_CNT 0x062 /* 0x188 */
+#define FBNIC_QUEUE_TWQ0_ERR_CNT 0x063 /* 0x18c */
+#define FBNIC_QUEUE_TWQ1_PKT_CNT 0x072 /* 0x1c8 */
+#define FBNIC_QUEUE_TWQ1_ERR_CNT 0x073 /* 0x1cc */
+
/* Tx Completion Queue Registers */
#define FBNIC_QUEUE_TCQ_CTL 0x080 /* 0x200 */
#define FBNIC_QUEUE_TCQ_CTL_RESET CSR_BIT(0)
@@ -908,6 +983,12 @@ enum {
FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_RSS = 2,
};
+/* Rx Per CQ Statistics Counters */
+#define FBNIC_QUEUE_RDE_PKT_CNT 0x2a2 /* 0xa88 */
+#define FBNIC_QUEUE_RDE_PKT_ERR_CNT 0x2a3 /* 0xa8c */
+#define FBNIC_QUEUE_RDE_CQ_DROP_CNT 0x2a4 /* 0xa90 */
+#define FBNIC_QUEUE_RDE_BDQ_DROP_CNT 0x2a5 /* 0xa94 */
+
/* Rx Interrupt Manager Registers */
#define FBNIC_QUEUE_RIM_CTL 0x2c0 /* 0xb00 */
#define FBNIC_QUEUE_RIM_CTL_MSIX_MASK CSR_GENMASK(7, 0)
@@ -928,43 +1009,6 @@ enum {
#define FBNIC_MAX_QUEUES 128
#define FBNIC_CSR_END_QUEUE (0x40000 + 0x400 * FBNIC_MAX_QUEUES - 1)
-/* PUL User Registers*/
-#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \
- 0x3106e /* 0xc41b8 */
-#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \
- 0x31070 /* 0xc41c0 */
-#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \
- 0x31071 /* 0xc41c4 */
-#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \
- 0x31072 /* 0xc41c8 */
-#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \
- 0x31073 /* 0xc41cc */
-#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \
- 0x31074 /* 0xc41d0 */
-#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \
- 0x31075 /* 0xc41d4 */
-#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \
- 0x31076 /* 0xc41d8 */
-#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \
- 0x31077 /* 0xc41dc */
-#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \
- 0x31078 /* 0xc41e0 */
-#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \
- 0x31079 /* 0xc41e4 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \
- 0x3107a /* 0xc41e8 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \
- 0x3107b /* 0xc41ec */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \
- 0x3107c /* 0xc41f0 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \
- 0x3107d /* 0xc41f4 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \
- 0x3107e /* 0xc41f8 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
- 0x3107f /* 0xc41fc */
-#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
-
/* BAR 4 CSRs */
/* The IPC mailbox consists of 32 mailboxes, with each mailbox consisting
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
index 59951b5abdb7..e8f2d7f2d962 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
@@ -10,6 +10,166 @@
static struct dentry *fbnic_dbg_root;
+static void fbnic_dbg_desc_break(struct seq_file *s, int i)
+{
+ while (i--)
+ seq_putc(s, '-');
+
+ seq_putc(s, '\n');
+}
+
+static int fbnic_dbg_mac_addr_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n",
+ "Idx", "S", "TCAM Bitmap", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ seq_printf(s, "%02d %d %64pb %pm\n",
+ i, mac_addr->state, mac_addr->act_tcam,
+ mac_addr->value.addr8);
+ seq_printf(s, " %pm\n",
+ mac_addr->mask.addr8);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_mac_addr);
+
+static int fbnic_dbg_tce_tcam_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ int i, tcam_idx = 0;
+ char hdr[80];
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n",
+ "Idx", "S", "TCAM Bitmap", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < ARRAY_SIZE(fbd->mac_addr); i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
+ continue;
+
+ if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES)
+ break;
+
+ seq_printf(s, "%02d %d %64pb %pm\n",
+ tcam_idx, mac_addr->state, mac_addr->act_tcam,
+ mac_addr->value.addr8);
+ seq_printf(s, " %pm\n",
+ mac_addr->mask.addr8);
+ tcam_idx++;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_tce_tcam);
+
+static int fbnic_dbg_act_tcam_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-55s %-4s %s\n",
+ "Idx", "S", "Value/Mask", "RSS", "Dest");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i];
+
+ seq_printf(s, "%02d %d %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %08x\n",
+ i, act_tcam->state,
+ act_tcam->value.tcam[10], act_tcam->value.tcam[9],
+ act_tcam->value.tcam[8], act_tcam->value.tcam[7],
+ act_tcam->value.tcam[6], act_tcam->value.tcam[5],
+ act_tcam->value.tcam[4], act_tcam->value.tcam[3],
+ act_tcam->value.tcam[2], act_tcam->value.tcam[1],
+ act_tcam->value.tcam[0], act_tcam->rss_en_mask,
+ act_tcam->dest);
+ seq_printf(s, " %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ act_tcam->mask.tcam[10], act_tcam->mask.tcam[9],
+ act_tcam->mask.tcam[8], act_tcam->mask.tcam[7],
+ act_tcam->mask.tcam[6], act_tcam->mask.tcam[5],
+ act_tcam->mask.tcam[4], act_tcam->mask.tcam[3],
+ act_tcam->mask.tcam[2], act_tcam->mask.tcam[1],
+ act_tcam->mask.tcam[0]);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_act_tcam);
+
+static int fbnic_dbg_ip_addr_show(struct seq_file *s,
+ struct fbnic_ip_addr *ip_addr)
+{
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s %s\n",
+ "Idx", "S", "TCAM Bitmap", "V", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
+ seq_printf(s, "%02d %d %64pb %d %pi6\n",
+ i, ip_addr->state, ip_addr->act_tcam,
+ ip_addr->version, &ip_addr->value);
+ seq_printf(s, " %pi6\n",
+ &ip_addr->mask);
+ }
+
+ return 0;
+}
+
+static int fbnic_dbg_ip_src_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ip_src);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_src);
+
+static int fbnic_dbg_ip_dst_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ip_dst);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_dst);
+
+static int fbnic_dbg_ipo_src_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ipo_src);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_src);
+
+static int fbnic_dbg_ipo_dst_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ipo_dst);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_dst);
+
static int fbnic_dbg_pcie_stats_show(struct seq_file *s, void *v)
{
struct fbnic_dev *fbd = s->private;
@@ -48,6 +208,20 @@ void fbnic_dbg_fbd_init(struct fbnic_dev *fbd)
fbd->dbg_fbd = debugfs_create_dir(name, fbnic_dbg_root);
debugfs_create_file("pcie_stats", 0400, fbd->dbg_fbd, fbd,
&fbnic_dbg_pcie_stats_fops);
+ debugfs_create_file("mac_addr", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_mac_addr_fops);
+ debugfs_create_file("tce_tcam", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_tce_tcam_fops);
+ debugfs_create_file("act_tcam", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_act_tcam_fops);
+ debugfs_create_file("ip_src", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ip_src_fops);
+ debugfs_create_file("ip_dst", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ip_dst_fops);
+ debugfs_create_file("ipo_src", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ipo_src_fops);
+ debugfs_create_file("ipo_dst", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ipo_dst_fops);
}
void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index 0072d612215e..4c4938eedd7b 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -3,10 +3,12 @@
#include <linux/unaligned.h>
#include <linux/pci.h>
+#include <linux/pldmfw.h>
#include <linux/types.h>
#include <net/devlink.h>
#include "fbnic.h"
+#include "fbnic_tlv.h"
#define FBNIC_SN_STR_LEN 24
@@ -109,8 +111,262 @@ static int fbnic_devlink_info_get(struct devlink *devlink,
return 0;
}
+static bool
+fbnic_pldm_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+ struct pldmfw_desc_tlv *desc;
+ u32 anti_rollback_ver = 0;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+ struct pci_dev *pdev;
+
+ /* First, use the standard PCI matching function */
+ if (!pldmfw_op_pci_match_record(context, record))
+ return false;
+
+ pdev = to_pci_dev(context->dev);
+ fbd = pci_get_drvdata(pdev);
+ devlink = priv_to_devlink(fbd);
+
+ /* If PCI match is successful, check for vendor-specific descriptors */
+ list_for_each_entry(desc, &record->descs, entry) {
+ if (desc->type != PLDM_DESC_ID_VENDOR_DEFINED)
+ continue;
+
+ if (desc->size < 21 || desc->data[0] != 1 ||
+ desc->data[1] != 15)
+ continue;
+
+ if (memcmp(desc->data + 2, "AntiRollbackVer", 15) != 0)
+ continue;
+
+ anti_rollback_ver = get_unaligned_le32(desc->data + 17);
+ break;
+ }
+
+ /* Compare versions and return error if they do not match */
+ if (anti_rollback_ver < fbd->fw_cap.anti_rollback_version) {
+ char buf[128];
+
+ snprintf(buf, sizeof(buf),
+ "New firmware anti-rollback version (0x%x) is older than device version (0x%x)!",
+ anti_rollback_ver, fbd->fw_cap.anti_rollback_version);
+ devlink_flash_update_status_notify(devlink, buf,
+ "Anti-Rollback", 0, 0);
+
+ return false;
+ }
+
+ return true;
+}
+
+static int
+fbnic_flash_start(struct fbnic_dev *fbd, struct pldmfw_component *component)
+{
+ struct fbnic_fw_completion *cmpl;
+ int err;
+
+ cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
+ if (!cmpl)
+ return -ENOMEM;
+
+ err = fbnic_fw_xmit_fw_start_upgrade(fbd, cmpl,
+ component->identifier,
+ component->component_size);
+ if (err)
+ goto cmpl_free;
+
+ /* Wait for firmware to ack firmware upgrade start */
+ if (wait_for_completion_timeout(&cmpl->done, 10 * HZ))
+ err = cmpl->result;
+ else
+ err = -ETIMEDOUT;
+
+ fbnic_fw_clear_cmpl(fbd, cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(cmpl);
+
+ return err;
+}
+
+static int
+fbnic_flash_component(struct pldmfw *context,
+ struct pldmfw_component *component)
+{
+ const u8 *data = component->component_data;
+ const u32 size = component->component_size;
+ struct fbnic_fw_completion *cmpl;
+ const char *component_name;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+ struct pci_dev *pdev;
+ u32 offset = 0;
+ u32 length = 0;
+ char buf[32];
+ int err;
+
+ pdev = to_pci_dev(context->dev);
+ fbd = pci_get_drvdata(pdev);
+ devlink = priv_to_devlink(fbd);
+
+ switch (component->identifier) {
+ case QSPI_SECTION_CMRT:
+ component_name = "boot1";
+ break;
+ case QSPI_SECTION_CONTROL_FW:
+ component_name = "boot2";
+ break;
+ case QSPI_SECTION_OPTION_ROM:
+ component_name = "option-rom";
+ break;
+ default:
+ snprintf(buf, sizeof(buf), "Unknown component ID %u!",
+ component->identifier);
+ devlink_flash_update_status_notify(devlink, buf, NULL, 0,
+ size);
+ return -EINVAL;
+ }
+
+ /* Once firmware receives the request to start upgrading it responds
+ * with two messages:
+ * 1. An ACK that it received the message and possible error code
+ * indicating that an upgrade is not currently possible.
+ * 2. A request for the first chunk of data
+ *
+ * Setup completions for write before issuing the start message so
+ * the driver can catch both messages.
+ */
+ cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ);
+ if (!cmpl)
+ return -ENOMEM;
+
+ err = fbnic_mbx_set_cmpl(fbd, cmpl);
+ if (err)
+ goto cmpl_free;
+
+ devlink_flash_update_timeout_notify(devlink, "Initializing",
+ component_name, 15);
+ err = fbnic_flash_start(fbd, component);
+ if (err)
+ goto err_no_msg;
+
+ while (offset < size) {
+ if (!wait_for_completion_timeout(&cmpl->done, 15 * HZ)) {
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ err = cmpl->result;
+ if (err)
+ break;
+
+ /* Verify firmware is requesting the next chunk in the seq. */
+ if (cmpl->u.fw_update.offset != offset + length) {
+ err = -EFAULT;
+ break;
+ }
+
+ offset = cmpl->u.fw_update.offset;
+ length = cmpl->u.fw_update.length;
+
+ if (length > TLV_MAX_DATA || offset + length > size) {
+ err = -EFAULT;
+ break;
+ }
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component_name,
+ offset, size);
+
+ /* Mailbox will set length to 0 once it receives the finish
+ * message.
+ */
+ if (!length)
+ continue;
+
+ reinit_completion(&cmpl->done);
+ err = fbnic_fw_xmit_fw_write_chunk(fbd, data, offset, length,
+ 0);
+ if (err)
+ break;
+ }
+
+ if (err) {
+ fbnic_fw_xmit_fw_write_chunk(fbd, NULL, 0, 0, err);
+err_no_msg:
+ snprintf(buf, sizeof(buf), "Mailbox encountered error %d!",
+ err);
+ devlink_flash_update_status_notify(devlink, buf,
+ component_name, 0, 0);
+ }
+
+ fbnic_fw_clear_cmpl(fbd, cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(cmpl);
+
+ return err;
+}
+
+static const struct pldmfw_ops fbnic_pldmfw_ops = {
+ .match_record = fbnic_pldm_match_record,
+ .flash_component = fbnic_flash_component,
+};
+
+static int
+fbnic_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_priv(devlink);
+ const struct firmware *fw = params->fw;
+ struct device *dev = fbd->dev;
+ struct pldmfw context;
+ char *err_msg;
+ int err;
+
+ context.ops = &fbnic_pldmfw_ops;
+ context.dev = dev;
+
+ err = pldmfw_flash_image(&context, fw);
+ if (err) {
+ switch (err) {
+ case -EINVAL:
+ err_msg = "Invalid image";
+ break;
+ case -EOPNOTSUPP:
+ err_msg = "Unsupported image";
+ break;
+ case -ENOMEM:
+ err_msg = "Out of memory";
+ break;
+ case -EFAULT:
+ err_msg = "Invalid header";
+ break;
+ case -ENOENT:
+ err_msg = "No matching record";
+ break;
+ case -ENODEV:
+ err_msg = "No matching device";
+ break;
+ case -ETIMEDOUT:
+ err_msg = "Timed out waiting for reply";
+ break;
+ default:
+ err_msg = "Unknown error";
+ break;
+ }
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Failed to flash PLDM Image: %s (error: %d)",
+ err_msg, err);
+ }
+
+ return err;
+}
+
static const struct devlink_ops fbnic_devlink_ops = {
- .info_get = fbnic_devlink_info_get,
+ .info_get = fbnic_devlink_info_get,
+ .flash_update = fbnic_devlink_flash_update,
};
void fbnic_devlink_free(struct fbnic_dev *fbd)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index cc8ca94529ca..5c7556c8c4c5 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -4,6 +4,7 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <net/ipv6.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
@@ -26,6 +27,19 @@ struct fbnic_stat {
FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
+ /* TTI */
+ FBNIC_HW_STAT("tti_cm_drop_frames", tti.cm_drop.frames),
+ FBNIC_HW_STAT("tti_cm_drop_bytes", tti.cm_drop.bytes),
+ FBNIC_HW_STAT("tti_frame_drop_frames", tti.frame_drop.frames),
+ FBNIC_HW_STAT("tti_frame_drop_bytes", tti.frame_drop.bytes),
+ FBNIC_HW_STAT("tti_tbi_drop_frames", tti.tbi_drop.frames),
+ FBNIC_HW_STAT("tti_tbi_drop_bytes", tti.tbi_drop.bytes),
+
+ /* TMI */
+ FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req),
+ FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts),
+ FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts),
+
/* RPC */
FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
@@ -38,36 +52,64 @@ static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
};
#define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
-#define FBNIC_HW_STATS_LEN FBNIC_HW_FIXED_STATS_LEN
-static int
-fbnic_get_ts_info(struct net_device *netdev,
- struct kernel_ethtool_ts_info *tsinfo)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
+#define FBNIC_RXB_ENQUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_enqueue_stats, name, stat)
- tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
+static const struct fbnic_stat fbnic_gstrings_rxb_enqueue_stats[] = {
+ FBNIC_RXB_ENQUEUE_STAT("rxb_integrity_err%u", integrity_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_mac_err%u", mac_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_parser_err%u", parser_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_frm_err%u", frm_err),
- tsinfo->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_frames", drbo.frames),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_bytes", drbo.bytes),
+};
- tsinfo->tx_types =
- BIT(HWTSTAMP_TX_OFF) |
- BIT(HWTSTAMP_TX_ON);
+#define FBNIC_HW_RXB_ENQUEUE_STATS_LEN \
+ ARRAY_SIZE(fbnic_gstrings_rxb_enqueue_stats)
- tsinfo->rx_filters =
- BIT(HWTSTAMP_FILTER_NONE) |
- BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
- BIT(HWTSTAMP_FILTER_ALL);
+#define FBNIC_RXB_FIFO_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_fifo_stats, name, stat)
- return 0;
-}
+static const struct fbnic_stat fbnic_gstrings_rxb_fifo_stats[] = {
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_drop", trans_drop),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_dropped_frames", drop.frames),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_ecn", trans_ecn),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_level", level),
+};
+
+#define FBNIC_HW_RXB_FIFO_STATS_LEN ARRAY_SIZE(fbnic_gstrings_rxb_fifo_stats)
+
+#define FBNIC_RXB_DEQUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_dequeue_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_dequeue_stats[] = {
+ FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_frames", intf.frames),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_bytes", intf.bytes),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_frames", pbuf.frames),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_bytes", pbuf.bytes),
+};
+
+#define FBNIC_HW_RXB_DEQUEUE_STATS_LEN \
+ ARRAY_SIZE(fbnic_gstrings_rxb_dequeue_stats)
+
+#define FBNIC_HW_Q_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_hw_q_stats, name, stat.value)
+
+static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = {
+ FBNIC_HW_Q_STAT("rde_%u_pkt_err", rde_pkt_err),
+ FBNIC_HW_Q_STAT("rde_%u_pkt_cq_drop", rde_pkt_cq_drop),
+ FBNIC_HW_Q_STAT("rde_%u_pkt_bdq_drop", rde_pkt_bdq_drop),
+};
+
+#define FBNIC_HW_Q_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_q_stats)
+#define FBNIC_HW_STATS_LEN \
+ (FBNIC_HW_FIXED_STATS_LEN + \
+ FBNIC_HW_RXB_ENQUEUE_STATS_LEN * FBNIC_RXB_ENQUEUE_INDICES + \
+ FBNIC_HW_RXB_FIFO_STATS_LEN * FBNIC_RXB_FIFO_INDICES + \
+ FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \
+ FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES)
static void
fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
@@ -79,24 +121,374 @@ fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
sizeof(drvinfo->fw_version));
}
-static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
+static int fbnic_get_regs_len(struct net_device *netdev)
{
- if (counter->reported)
- *stat = counter->value;
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
}
-static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
+static void fbnic_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
+}
+
+static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
+{
+ struct fbnic_net *clone;
+
+ clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
+ if (!clone)
+ return NULL;
+
+ memset(clone->tx, 0, sizeof(clone->tx));
+ memset(clone->rx, 0, sizeof(clone->rx));
+ memset(clone->napi, 0, sizeof(clone->napi));
+ return clone;
+}
+
+static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
+ struct fbnic_net *clone)
+{
+ swap(clone->rcq_size, orig->rcq_size);
+ swap(clone->hpq_size, orig->hpq_size);
+ swap(clone->ppq_size, orig->ppq_size);
+ swap(clone->txq_size, orig->txq_size);
+ swap(clone->num_rx_queues, orig->num_rx_queues);
+ swap(clone->num_tx_queues, orig->num_tx_queues);
+ swap(clone->num_napi, orig->num_napi);
+}
+
+static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ for (i = 0; i < nv->txt_count; i++) {
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
+ }
+
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
+ }
+}
+
+static void fbnic_clone_swap(struct fbnic_net *orig,
+ struct fbnic_net *clone)
+{
+ struct fbnic_dev *fbd = orig->fbd;
+ unsigned int i;
+
+ for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
+ fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
+ for (i = 0; i < orig->num_napi; i++)
+ fbnic_aggregate_vector_counters(orig, orig->napi[i]);
+
+ fbnic_clone_swap_cfg(orig, clone);
+
+ for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
+ swap(clone->napi[i], orig->napi[i]);
+ for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
+ swap(clone->tx[i], orig->tx[i]);
+ for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
+ swap(clone->rx[i], orig->rx[i]);
+}
+
+static void fbnic_clone_free(struct fbnic_net *clone)
+{
+ kfree(clone);
+}
+
+static int fbnic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ ec->tx_coalesce_usecs = fbn->tx_usecs;
+ ec->rx_coalesce_usecs = fbn->rx_usecs;
+ ec->rx_max_coalesced_frames = fbn->rx_max_frames;
+
+ return 0;
+}
+
+static int fbnic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ /* Verify against hardware limits */
+ if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
+ return -EINVAL;
+ }
+ if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
+ return -EINVAL;
+ }
+ if (ec->rx_max_coalesced_frames >
+ FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
+ FBNIC_MIN_RXD_PER_FRAME) {
+ NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
+ return -EINVAL;
+ }
+
+ fbn->tx_usecs = ec->tx_coalesce_usecs;
+ fbn->rx_usecs = ec->rx_coalesce_usecs;
+ fbn->rx_max_frames = ec->rx_max_coalesced_frames;
+
+ if (netif_running(netdev)) {
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ fbnic_config_txrx_usecs(nv, 0);
+ fbnic_config_rx_frames(nv);
+ }
+ }
+
+ return 0;
+}
+
+static void
+fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX;
+
+ ring->rx_pending = fbn->rcq_size;
+ ring->rx_mini_pending = fbn->hpq_size;
+ ring->rx_jumbo_pending = fbn->ppq_size;
+ ring->tx_pending = fbn->txq_size;
+}
+
+static void fbnic_set_rings(struct fbnic_net *fbn,
+ struct ethtool_ringparam *ring)
+{
+ fbn->rcq_size = ring->rx_pending;
+ fbn->hpq_size = ring->rx_mini_pending;
+ fbn->ppq_size = ring->rx_jumbo_pending;
+ fbn->txq_size = ring->tx_pending;
+}
+
+static int
+fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_net *clone;
+ int err;
+
+ ring->rx_pending = roundup_pow_of_two(ring->rx_pending);
+ ring->rx_mini_pending = roundup_pow_of_two(ring->rx_mini_pending);
+ ring->rx_jumbo_pending = roundup_pow_of_two(ring->rx_jumbo_pending);
+ ring->tx_pending = roundup_pow_of_two(ring->tx_pending);
+
+ /* These are absolute minimums allowing the device and driver to operate
+ * but not necessarily guarantee reasonable performance. Settings below
+ * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal
+ * at best.
+ */
+ if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) ||
+ ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN ||
+ ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN ||
+ ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) {
+ NL_SET_ERR_MSG_MOD(extack, "requested ring size too small");
+ return -EINVAL;
+ }
+
+ if (!netif_running(netdev)) {
+ fbnic_set_rings(fbn, ring);
+ return 0;
+ }
+
+ clone = fbnic_clone_create(fbn);
+ if (!clone)
+ return -ENOMEM;
+
+ fbnic_set_rings(clone, ring);
+
+ err = fbnic_alloc_napi_vectors(clone);
+ if (err)
+ goto err_free_clone;
+
+ err = fbnic_alloc_resources(clone);
+ if (err)
+ goto err_free_napis;
+
+ fbnic_down_noidle(fbn);
+ err = fbnic_wait_all_queues_idle(fbn->fbd, true);
+ if (err)
+ goto err_start_stack;
+
+ err = fbnic_set_netif_queues(clone);
+ if (err)
+ goto err_start_stack;
+
+ /* Nothing can fail past this point */
+ fbnic_flush(fbn);
+
+ fbnic_clone_swap(fbn, clone);
+
+ fbnic_up(fbn);
+
+ fbnic_free_resources(clone);
+ fbnic_free_napi_vectors(clone);
+ fbnic_clone_free(clone);
+
+ return 0;
+
+err_start_stack:
+ fbnic_flush(fbn);
+ fbnic_up(fbn);
+ fbnic_free_resources(clone);
+err_free_napis:
+ fbnic_free_napi_vectors(clone);
+err_free_clone:
+ fbnic_clone_free(clone);
+ return err;
+}
+
+static void fbnic_get_rxb_enqueue_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_rxb_enqueue_stats;
+ for (i = 0; i < FBNIC_HW_RXB_ENQUEUE_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_rxb_fifo_strings(u8 **data, unsigned int idx)
{
+ const struct fbnic_stat *stat;
int i;
+ stat = fbnic_gstrings_rxb_fifo_stats;
+ for (i = 0; i < FBNIC_HW_RXB_FIFO_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_rxb_dequeue_stats;
+ for (i = 0; i < FBNIC_HW_RXB_DEQUEUE_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
+{
+ const struct fbnic_stat *stat;
+ int i, idx;
+
switch (sset) {
case ETH_SS_STATS:
- for (i = 0; i < FBNIC_HW_STATS_LEN; i++)
+ for (i = 0; i < FBNIC_HW_FIXED_STATS_LEN; i++)
ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
+
+ for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++)
+ fbnic_get_rxb_enqueue_strings(&data, i);
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_strings(&data, i);
+
+ for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++)
+ fbnic_get_rxb_dequeue_strings(&data, i);
+
+ for (idx = 0; idx < FBNIC_MAX_QUEUES; idx++) {
+ stat = fbnic_gstrings_hw_q_stats;
+
+ for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++)
+ ethtool_sprintf(&data, stat->string, idx);
+ }
break;
}
}
+static void fbnic_report_hw_stats(const struct fbnic_stat *stat,
+ const void *base, int len, u64 **data)
+{
+ while (len--) {
+ u8 *curr = (u8 *)base + stat->offset;
+
+ **data = *(u64 *)curr;
+
+ stat++;
+ (*data)++;
+ }
+}
+
+static void fbnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ fbnic_get_hw_stats(fbn->fbd);
+
+ spin_lock(&fbd->hw_stats_lock);
+ fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats,
+ FBNIC_HW_FIXED_STATS_LEN, &data);
+
+ for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) {
+ const struct fbnic_rxb_enqueue_stats *enq;
+
+ enq = &fbd->hw_stats.rxb.enq[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_enqueue_stats,
+ enq, FBNIC_HW_RXB_ENQUEUE_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) {
+ const struct fbnic_rxb_fifo_stats *fifo;
+
+ fifo = &fbd->hw_stats.rxb.fifo[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_fifo_stats,
+ fifo, FBNIC_HW_RXB_FIFO_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) {
+ const struct fbnic_rxb_dequeue_stats *deq;
+
+ deq = &fbd->hw_stats.rxb.deq[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_dequeue_stats,
+ deq, FBNIC_HW_RXB_DEQUEUE_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_MAX_QUEUES; i++) {
+ const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i];
+
+ fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
+ FBNIC_HW_Q_STATS_LEN, &data);
+ }
+ spin_unlock(&fbd->hw_stats_lock);
+}
+
static int fbnic_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
@@ -107,19 +499,1075 @@ static int fbnic_get_sset_count(struct net_device *dev, int sset)
}
}
-static void fbnic_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+static int fbnic_get_rss_hash_idx(u32 flow_type)
+{
+ switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ return FBNIC_TCP4_HASH_OPT;
+ case TCP_V6_FLOW:
+ return FBNIC_TCP6_HASH_OPT;
+ case UDP_V4_FLOW:
+ return FBNIC_UDP4_HASH_OPT;
+ case UDP_V6_FLOW:
+ return FBNIC_UDP6_HASH_OPT;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ case IPV4_USER_FLOW:
+ return FBNIC_IPV4_HASH_OPT;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ case IPV6_USER_FLOW:
+ return FBNIC_IPV6_HASH_OPT;
+ case ETHER_FLOW:
+ return FBNIC_ETHER_HASH_OPT;
+ }
+
+ return -1;
+}
+
+static int
+fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
+{
+ int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Report options from rss_en table in fbn */
+ cmd->data = fbn->rss_flow_hash[hash_opt_idx];
+
+ return 0;
+}
+
+static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, cnt = 0;
+
+ /* Report maximum rule count */
+ cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
+
+ for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
+ int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ struct fbnic_act_tcam *act_tcam;
+
+ act_tcam = &fbd->act_tcam[idx];
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ if (rule_locs) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = i;
+ }
+
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ int idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
+ return -EINVAL;
+
+ idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ return -EINVAL;
+
+ /* Report maximum rule count */
+ cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
+
+ /* Set flow type field */
+ if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
+ fsp->flow_type = ETHER_FLOW;
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ act_tcam->mask.tcam[1])) {
+ struct fbnic_mac_addr *mac_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ act_tcam->value.tcam[1]);
+ mac_addr = &fbd->mac_addr[idx];
+
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ mac_addr->value.addr8);
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
+ } else if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
+ fsp->flow_type = IPV6_USER_FLOW;
+ fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
+ fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ipo_src[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6src[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6src[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ipo_dst[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6dst[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6dst[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+ } else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
+ if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
+ if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
+ fsp->flow_type = UDP_V6_FLOW;
+ else
+ fsp->flow_type = TCP_V6_FLOW;
+ fsp->h_u.tcp_ip6_spec.psrc =
+ cpu_to_be16(act_tcam->value.tcam[3]);
+ fsp->m_u.tcp_ip6_spec.psrc =
+ cpu_to_be16(~act_tcam->mask.tcam[3]);
+ fsp->h_u.tcp_ip6_spec.pdst =
+ cpu_to_be16(act_tcam->value.tcam[4]);
+ fsp->m_u.tcp_ip6_spec.pdst =
+ cpu_to_be16(~act_tcam->mask.tcam[4]);
+ } else {
+ fsp->flow_type = IPV6_USER_FLOW;
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_src[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6src[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6src[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_dst[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6dst[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6dst[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+ } else {
+ if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
+ if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
+ fsp->flow_type = UDP_V4_FLOW;
+ else
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(act_tcam->value.tcam[3]);
+ fsp->m_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(~act_tcam->mask.tcam[3]);
+ fsp->h_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(act_tcam->value.tcam[4]);
+ fsp->m_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(~act_tcam->mask.tcam[4]);
+ } else {
+ fsp->flow_type = IPV4_USER_FLOW;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_src[idx];
+
+ fsp->h_u.usr_ip4_spec.ip4src =
+ ip_addr->value.s6_addr32[3];
+ fsp->m_u.usr_ip4_spec.ip4src =
+ ~ip_addr->mask.s6_addr32[3];
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_dst[idx];
+
+ fsp->h_u.usr_ip4_spec.ip4dst =
+ ip_addr->value.s6_addr32[3];
+ fsp->m_u.usr_ip4_spec.ip4dst =
+ ~ip_addr->mask.s6_addr32[3];
+ }
+ }
+
+ /* Record action */
+ if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
+ fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
+ act_tcam->dest);
+ else
+ fsp->flow_type |= FLOW_RSS;
+
+ cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
+ act_tcam->dest);
+
+ return 0;
+}
+
+static int fbnic_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+ u32 special = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = fbn->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = fbnic_get_rss_hash_opts(fbn, cmd);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = fbnic_get_cls_rule(fbn, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ rule_locs = NULL;
+ special = RX_CLS_LOC_SPECIAL;
+ fallthrough;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
+ if (ret < 0)
+ break;
+
+ cmd->data |= special;
+ cmd->rule_cnt = ret;
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+#define FBNIC_L2_HASH_OPTIONS \
+ (RXH_L2DA | RXH_DISCARD)
+#define FBNIC_L3_HASH_OPTIONS \
+ (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
+#define FBNIC_L4_HASH_OPTIONS \
+ (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+static int
+fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd)
+{
+ int hash_opt_idx;
+
+ /* Verify the type requested is correct */
+ hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Verify the fields asked for can actually be assigned based on type */
+ if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
+ (hash_opt_idx > FBNIC_L4_HASH_OPT &&
+ cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
+ (hash_opt_idx > FBNIC_IP_HASH_OPT &&
+ cmd->data & ~FBNIC_L2_HASH_OPTIONS))
+ return -EINVAL;
+
+ fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_rss_reinit(fbn->fbd, fbn);
+ fbnic_write_rules(fbn->fbd);
+ }
+
+ return 0;
+}
+
+static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
{
- struct fbnic_net *fbn = netdev_priv(dev);
- const struct fbnic_stat *stat;
int i;
- fbnic_get_hw_stats(fbn->fbd);
+ for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
+ int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
- for (i = 0; i < FBNIC_HW_STATS_LEN; i++) {
- stat = &fbnic_gstrings_hw_stats[i];
- data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset);
+ if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
+ return i;
}
+
+ return -ENOSPC;
+}
+
+static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
+ const struct ethtool_rxnfc *cmd)
+{
+ u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
+ u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
+ u16 misc = 0, misc_mask = ~0;
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_HOST);
+ struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
+ struct fbnic_mac_addr *mac_addr = NULL;
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ struct in6_addr *addr6, *mask6;
+ struct in_addr *addr4, *mask4;
+ int hash_idx, location;
+ u32 flow_type;
+ int idx, j;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+ location = fbnic_cls_rule_any_loc(fbd);
+ if (location < 0)
+ return location;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ dest = FBNIC_RPC_ACT_TBL0_DROP;
+ } else if (fsp->flow_type & FLOW_RSS) {
+ if (cmd->rss_context == 1)
+ dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
+ } else {
+ u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+
+ if (ring_idx >= fbn->num_rx_queues)
+ return -EINVAL;
+
+ dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
+ FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
+ }
+
+ idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ /* Do not allow overwriting for now.
+ * To support overwriting rules we will need to add logic to free
+ * any IP or MACDA TCAMs that may be associated with the old rule.
+ */
+ if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
+ return -EBUSY;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
+ hash_idx = fbnic_get_rss_hash_idx(flow_type);
+
+ switch (flow_type) {
+ case UDP_V4_FLOW:
+udp4_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
+ fallthrough;
+ case TCP_V4_FLOW:
+tcp4_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
+ FBNIC_RPC_TCAM_ACT1_L4_VALID);
+
+ sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
+ sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
+ dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
+ goto ip4_flow;
+ case IP_USER_FLOW:
+ if (!fsp->m_u.usr_ip4_spec.proto)
+ goto ip4_flow;
+ if (fsp->m_u.usr_ip4_spec.proto != 0xff)
+ return -EINVAL;
+ if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
+ goto udp4_flow;
+ if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
+ goto tcp4_flow;
+ return -EINVAL;
+ip4_flow:
+ addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
+ mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
+ if (mask4->s_addr) {
+ ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
+ addr4, mask4);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ ip_src - fbd->ip_src);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
+ }
+
+ addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
+ mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
+ if (mask4->s_addr) {
+ ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
+ addr4, mask4);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ ip_dst - fbd->ip_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
+ }
+ flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
+ break;
+ case UDP_V6_FLOW:
+udp6_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
+ fallthrough;
+ case TCP_V6_FLOW:
+tcp6_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
+ FBNIC_RPC_TCAM_ACT1_L4_VALID);
+
+ sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
+ sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
+ dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
+ goto ipv6_flow;
+ case IPV6_USER_FLOW:
+ if (!fsp->m_u.usr_ip6_spec.l4_proto)
+ goto ipv6_flow;
+
+ if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
+ return -EINVAL;
+ if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
+ goto udp6_flow;
+ if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
+ goto tcp6_flow;
+ if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
+ return -EINVAL;
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
+ if (!ipv6_addr_any(mask6)) {
+ ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
+ addr6, mask6);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |=
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ ip_src - fbd->ipo_src);
+ ip_mask &=
+ ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
+ }
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
+ if (!ipv6_addr_any(mask6)) {
+ ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
+ addr6, mask6);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |=
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ ip_dst - fbd->ipo_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
+ flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
+ipv6_flow:
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
+ if (!ip_src && !ipv6_addr_any(mask6)) {
+ ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
+ addr6, mask6);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ ip_src - fbd->ip_src);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
+ }
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
+ if (!ip_dst && !ipv6_addr_any(mask6)) {
+ ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
+ addr6, mask6);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ ip_dst - fbd->ip_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
+ break;
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ u8 *addr = fsp->h_u.ether_spec.h_dest;
+ u8 *mask = fsp->m_u.ether_spec.h_dest;
+
+ /* Do not allow MAC addr of 0 */
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Only support full MAC address to avoid
+ * conflicts with other MAC addresses.
+ */
+ if (!is_broadcast_ether_addr(mask))
+ return -EINVAL;
+
+ if (is_multicast_ether_addr(addr))
+ mac_addr = __fbnic_mc_sync(fbd, addr);
+ else
+ mac_addr = __fbnic_uc_sync(fbd, addr);
+
+ if (!mac_addr)
+ return -ENOSPC;
+
+ set_bit(idx, mac_addr->act_tcam);
+ flow_value |=
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ mac_addr - fbd->mac_addr);
+ flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Write action table values */
+ act_tcam->dest = dest;
+ act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
+
+ /* Write IP Match value/mask to action_tcam[0] */
+ act_tcam->value.tcam[0] = ip_value;
+ act_tcam->mask.tcam[0] = ip_mask;
+
+ /* Write flow type value/mask to action_tcam[1] */
+ act_tcam->value.tcam[1] = flow_value;
+ act_tcam->mask.tcam[1] = flow_mask;
+
+ /* Write error, DSCP, extra L4 matches to action_tcam[2] */
+ act_tcam->value.tcam[2] = misc;
+ act_tcam->mask.tcam[2] = misc_mask;
+
+ /* Write source/destination port values */
+ act_tcam->value.tcam[3] = sport;
+ act_tcam->mask.tcam[3] = sport_mask;
+ act_tcam->value.tcam[4] = dport;
+ act_tcam->mask.tcam[4] = dport_mask;
+
+ for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ fsp->location = location;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_write_rules(fbd);
+ if (ip_src || ip_dst)
+ fbnic_write_ip_addr(fbd);
+ if (mac_addr)
+ fbnic_write_macda(fbd);
+ }
+
+ return 0;
+}
+
+static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
+ unsigned int tcam_idx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
+ __fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
+
+ /* Write updates to hardware */
+ if (netif_running(fbn->netdev))
+ fbnic_write_macda(fbd);
+}
+
+static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
+ unsigned int tcam_idx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
+ __fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
+ __fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
+ __fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
+ __fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
+
+ /* Write updates to hardware */
+ if (netif_running(fbn->netdev))
+ fbnic_write_ip_addr(fbd);
+}
+
+static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
+ const struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ int idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
+ return -EINVAL;
+
+ idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ return -EINVAL;
+
+ act_tcam->state = FBNIC_TCAM_S_DELETE;
+
+ if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
+ (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
+ fbnic_clear_nfc_macda(fbn, idx);
+
+ if ((act_tcam->value.tcam[0] &
+ (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
+ (~act_tcam->mask.tcam[0] &
+ (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
+ fbnic_clear_nfc_ip_addr(fbn, idx);
+
+ if (netif_running(fbn->netdev))
+ fbnic_write_rules(fbd);
+
+ return 0;
+}
+
+static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = fbnic_set_rss_hash_opts(fbn, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = fbnic_set_cls_rule_ins(fbn, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = fbnic_set_cls_rule_del(fbn, cmd);
+ break;
+ }
+
+ return ret;
+}
+
+static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
+{
+ return FBNIC_RPC_RSS_KEY_BYTE_LEN;
+}
+
+static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return FBNIC_RPC_RSS_TBL_SIZE;
+}
+
+static int
+fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int i;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->key) {
+ for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
+ u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
+
+ rxfh->key[i] = rss_key >> 24;
+ }
+ }
+
+ if (rxfh->indir) {
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ rxfh->indir[i] = fbn->indir_tbl[0][i];
+ }
+
+ return 0;
+}
+
+static unsigned int
+fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
+{
+ unsigned int i, changes = 0;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ if (fbn->indir_tbl[idx][i] == indir[i])
+ continue;
+
+ fbn->indir_tbl[idx][i] = indir[i];
+ changes++;
+ }
+
+ return changes;
+}
+
+static int
+fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int i, changes = 0;
+
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EINVAL;
+
+ if (rxfh->key) {
+ u32 rss_key = 0;
+
+ for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
+ rss_key >>= 8;
+ rss_key |= (u32)(rxfh->key[i]) << 24;
+
+ if (i % 4)
+ continue;
+
+ if (fbn->rss_key[i / 4] == rss_key)
+ continue;
+
+ fbn->rss_key[i / 4] = rss_key;
+ changes++;
+ }
+ }
+
+ if (rxfh->indir)
+ changes += fbnic_set_indir(fbn, 0, rxfh->indir);
+
+ if (changes && netif_running(netdev))
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ return 0;
+}
+
+static int
+fbnic_modify_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ const u32 *indir = rxfh->indir;
+ unsigned int changes;
+
+ if (!indir)
+ indir = ethtool_rxfh_context_indir(ctx);
+
+ changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
+ if (changes && netif_running(netdev))
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ return 0;
+}
+
+static int
+fbnic_create_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
+ return -EOPNOTSUPP;
+ }
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+
+ if (!rxfh->indir) {
+ u32 *indir = ethtool_rxfh_context_indir(ctx);
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ indir[i] = ethtool_rxfh_indir_default(i, num_rx);
+ }
+
+ return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
+}
+
+static int
+fbnic_remove_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx, u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ /* Nothing to do, contexts are allocated statically */
+ return 0;
+}
+
+static void fbnic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ ch->max_rx = fbd->max_num_queues;
+ ch->max_tx = fbd->max_num_queues;
+ ch->max_combined = min(ch->max_rx, ch->max_tx);
+ ch->max_other = FBNIC_NON_NAPI_VECTORS;
+
+ if (fbn->num_rx_queues > fbn->num_napi ||
+ fbn->num_tx_queues > fbn->num_napi)
+ ch->combined_count = min(fbn->num_rx_queues,
+ fbn->num_tx_queues);
+ else
+ ch->combined_count =
+ fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
+ ch->rx_count = fbn->num_rx_queues - ch->combined_count;
+ ch->tx_count = fbn->num_tx_queues - ch->combined_count;
+ ch->other_count = FBNIC_NON_NAPI_VECTORS;
+}
+
+static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
+ unsigned int max_napis)
+{
+ fbn->num_rx_queues = ch->rx_count + ch->combined_count;
+ fbn->num_tx_queues = ch->tx_count + ch->combined_count;
+ fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
+ max_napis);
+}
+
+static int fbnic_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int max_napis, standalone;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_net *clone;
+ int err;
+
+ max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
+ standalone = ch->rx_count + ch->tx_count;
+
+ /* Limits for standalone queues:
+ * - each queue has it's own NAPI (num_napi >= rx + tx + combined)
+ * - combining queues (combined not 0, rx or tx must be 0)
+ */
+ if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
+ (standalone && standalone + ch->combined_count > max_napis) ||
+ ch->rx_count + ch->combined_count > fbd->max_num_queues ||
+ ch->tx_count + ch->combined_count > fbd->max_num_queues ||
+ ch->other_count != FBNIC_NON_NAPI_VECTORS)
+ return -EINVAL;
+
+ if (!netif_running(netdev)) {
+ fbnic_set_queues(fbn, ch, max_napis);
+ fbnic_reset_indir_tbl(fbn);
+ return 0;
+ }
+
+ clone = fbnic_clone_create(fbn);
+ if (!clone)
+ return -ENOMEM;
+
+ fbnic_set_queues(clone, ch, max_napis);
+
+ err = fbnic_alloc_napi_vectors(clone);
+ if (err)
+ goto err_free_clone;
+
+ err = fbnic_alloc_resources(clone);
+ if (err)
+ goto err_free_napis;
+
+ fbnic_down_noidle(fbn);
+ err = fbnic_wait_all_queues_idle(fbn->fbd, true);
+ if (err)
+ goto err_start_stack;
+
+ err = fbnic_set_netif_queues(clone);
+ if (err)
+ goto err_start_stack;
+
+ /* Nothing can fail past this point */
+ fbnic_flush(fbn);
+
+ fbnic_clone_swap(fbn, clone);
+
+ /* Reset RSS indirection table */
+ fbnic_reset_indir_tbl(fbn);
+
+ fbnic_up(fbn);
+
+ fbnic_free_resources(clone);
+ fbnic_free_napi_vectors(clone);
+ fbnic_clone_free(clone);
+
+ return 0;
+
+err_start_stack:
+ fbnic_flush(fbn);
+ fbnic_up(fbn);
+ fbnic_free_resources(clone);
+err_free_napis:
+ fbnic_free_napi_vectors(clone);
+err_free_clone:
+ fbnic_clone_free(clone);
+ return err;
+}
+
+static int
+fbnic_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *tsinfo)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
+
+ tsinfo->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ tsinfo->tx_types =
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ tsinfo->rx_filters =
+ BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static void fbnic_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ u64 ts_packets, ts_lost;
+ struct fbnic_ring *ring;
+ unsigned int start;
+ int i;
+
+ ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
+ ts_stats->lost = fbn->tx_stats.twq.ts_lost;
+ for (i = 0; i < fbn->num_tx_queues; i++) {
+ ring = fbn->tx[i];
+ do {
+ start = u64_stats_fetch_begin(&ring->stats.syncp);
+ ts_packets = ring->stats.twq.ts_packets;
+ ts_lost = ring->stats.twq.ts_lost;
+ } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
+ ts_stats->pkts += ts_packets;
+ ts_stats->lost += ts_lost;
+ }
+}
+
+static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
+{
+ if (counter->reported)
+ *stat = counter->value;
}
static void
@@ -164,51 +1612,32 @@ fbnic_get_eth_mac_stats(struct net_device *netdev,
&mac_stats->eth_mac.FrameTooLongErrors);
}
-static void fbnic_get_ts_stats(struct net_device *netdev,
- struct ethtool_ts_stats *ts_stats)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
- u64 ts_packets, ts_lost;
- struct fbnic_ring *ring;
- unsigned int start;
- int i;
-
- ts_stats->pkts = fbn->tx_stats.ts_packets;
- ts_stats->lost = fbn->tx_stats.ts_lost;
- for (i = 0; i < fbn->num_tx_queues; i++) {
- ring = fbn->tx[i];
- do {
- start = u64_stats_fetch_begin(&ring->stats.syncp);
- ts_packets = ring->stats.ts_packets;
- ts_lost = ring->stats.ts_lost;
- } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
- ts_stats->pkts += ts_packets;
- ts_stats->lost += ts_lost;
- }
-}
-
-static void fbnic_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *data)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
-
- fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
-}
-
-static int fbnic_get_regs_len(struct net_device *netdev)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
-
- return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
-}
-
static const struct ethtool_ops fbnic_ethtool_ops = {
+ .supported_coalesce_params =
+ ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
.get_drvinfo = fbnic_get_drvinfo,
.get_regs_len = fbnic_get_regs_len,
.get_regs = fbnic_get_regs,
+ .get_coalesce = fbnic_get_coalesce,
+ .set_coalesce = fbnic_set_coalesce,
+ .get_ringparam = fbnic_get_ringparam,
+ .set_ringparam = fbnic_set_ringparam,
.get_strings = fbnic_get_strings,
.get_ethtool_stats = fbnic_get_ethtool_stats,
.get_sset_count = fbnic_get_sset_count,
+ .get_rxnfc = fbnic_get_rxnfc,
+ .set_rxnfc = fbnic_set_rxnfc,
+ .get_rxfh_key_size = fbnic_get_rxfh_key_size,
+ .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
+ .get_rxfh = fbnic_get_rxfh,
+ .set_rxfh = fbnic_set_rxfh,
+ .create_rxfh_context = fbnic_create_rxfh_context,
+ .modify_rxfh_context = fbnic_modify_rxfh_context,
+ .remove_rxfh_context = fbnic_remove_rxfh_context,
+ .get_channels = fbnic_get_channels,
+ .set_channels = fbnic_set_channels,
.get_ts_info = fbnic_get_ts_info,
.get_ts_stats = fbnic_get_ts_stats,
.get_eth_mac_stats = fbnic_get_eth_mac_stats,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 8f7a2a19ddf8..4521d0483d18 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -17,11 +17,29 @@ static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
{
u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+ /* Write the upper 32b and then the lower 32b. Doing this the
+ * FW can then read lower, upper, lower to verify that the state
+ * of the descriptor wasn't changed mid-transaction.
+ */
fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
fw_wrfl(fbd);
fw_wr32(fbd, desc_offset, lower_32_bits(desc));
}
+static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx, u32 desc)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+
+ /* For initialization we write the lower 32b of the descriptor first.
+ * This way we can set the state to mark it invalid before we clear the
+ * upper 32b.
+ */
+ fw_wr32(fbd, desc_offset, desc);
+ fw_wrfl(fbd);
+ fw_wr32(fbd, desc_offset + 1, 0);
+}
+
static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
{
u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
@@ -33,29 +51,41 @@ static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
return desc;
}
-static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
int desc_idx;
+ /* Disable DMA transactions from the device,
+ * and flush any transactions triggered during cleaning
+ */
+ switch (mbx_idx) {
+ case FBNIC_IPC_MBX_RX_IDX:
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
+ break;
+ case FBNIC_IPC_MBX_TX_IDX:
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
+ break;
+ }
+
+ wrfl(fbd);
+
/* Initialize first descriptor to all 0s. Doing this gives us a
* solid stop for the firmware to hit when it is done looping
* through the ring.
*/
- __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0);
-
- fw_wrfl(fbd);
+ __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
/* We then fill the rest of the ring starting at the end and moving
* back toward descriptor 0 with skip descriptors that have no
* length nor address, and tell the firmware that they can skip
* them and just move past them to the one we initialized to 0.
*/
- for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) {
- __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx,
- FBNIC_IPC_MBX_DESC_FW_CMPL |
- FBNIC_IPC_MBX_DESC_HOST_CMPL);
- fw_wrfl(fbd);
- }
+ for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
+ __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
+ FBNIC_IPC_MBX_DESC_FW_CMPL |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
}
void fbnic_mbx_init(struct fbnic_dev *fbd)
@@ -76,7 +106,7 @@ void fbnic_mbx_init(struct fbnic_dev *fbd)
wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
- fbnic_mbx_init_desc_ring(fbd, i);
+ fbnic_mbx_reset_desc_ring(fbd, i);
}
static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
@@ -97,11 +127,8 @@ static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
return -EBUSY;
addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
- if (dma_mapping_error(fbd->dev, addr)) {
- free_page((unsigned long)msg);
-
+ if (dma_mapping_error(fbd->dev, addr))
return -ENOSPC;
- }
mbx->buf_info[tail].msg = msg;
mbx->buf_info[tail].addr = addr;
@@ -141,7 +168,7 @@ static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
int i;
- fbnic_mbx_init_desc_ring(fbd, mbx_idx);
+ fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
@@ -207,6 +234,44 @@ static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
return err;
}
+static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ int free = -EXFULL;
+ int i;
+
+ if (!tx_mbx->ready)
+ return -ENODEV;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (!fbd->cmpl_data[i])
+ free = i;
+ else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type)
+ return -EEXIST;
+ }
+
+ if (free == -EXFULL)
+ return -EXFULL;
+
+ fbd->cmpl_data[free] = cmpl_data;
+
+ return 0;
+}
+
+static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (fbd->cmpl_data[i] == cmpl_data) {
+ fbd->cmpl_data[i] = NULL;
+ break;
+ }
+ }
+}
+
static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
{
struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
@@ -228,6 +293,79 @@ static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
tx_mbx->head = head;
}
+int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg *msg,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ if (cmpl_data) {
+ err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
+ if (err)
+ goto unlock_mbx;
+ }
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
+ le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
+
+ /* If we successfully reserved a completion and msg failed
+ * then clear completion data for next caller
+ */
+ if (err && cmpl_data)
+ fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data);
+
+unlock_mbx:
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+static void fbnic_fw_release_cmpl_data(struct kref *kref)
+{
+ struct fbnic_fw_completion *cmpl_data;
+
+ cmpl_data = container_of(kref, struct fbnic_fw_completion,
+ ref_count);
+ kfree(cmpl_data);
+}
+
+static struct fbnic_fw_completion *
+fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
+{
+ struct fbnic_fw_completion *cmpl_data = NULL;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (fbd->cmpl_data[i] &&
+ fbd->cmpl_data[i]->msg_type == msg_type) {
+ cmpl_data = fbd->cmpl_data[i];
+ kref_get(&cmpl_data->ref_count);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return cmpl_data;
+}
+
/**
* fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
* @fbd: FBNIC device structure
@@ -265,67 +403,41 @@ static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
return err;
}
-/**
- * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message
- * @fbd: FBNIC device structure
- *
- * Return: NULL on failure to allocate, error pointer on error, or pointer
- * to new TLV test message.
- *
- * Sends a single TLV header indicating the host wants the firmware to
- * confirm the capabilities and version.
- **/
-static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd)
-{
- int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
-
- /* Return 0 if we are not calling this on ASIC */
- return (err == -EOPNOTSUPP) ? 0 : err;
-}
-
-static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
- /* This is a one time init, so just exit if it is completed */
- if (mbx->ready)
- return;
-
mbx->ready = true;
switch (mbx_idx) {
case FBNIC_IPC_MBX_RX_IDX:
+ /* Enable DMA writes from the device */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
+
/* Make sure we have a page for the FW to write to */
fbnic_mbx_alloc_rx_msgs(fbd);
break;
case FBNIC_IPC_MBX_TX_IDX:
- /* Force version to 1 if we successfully requested an update
- * from the firmware. This should be overwritten once we get
- * the actual version from the firmware in the capabilities
- * request message.
- */
- if (!fbnic_fw_xmit_cap_msg(fbd) &&
- !fbd->fw_cap.running.mgmt.version)
- fbd->fw_cap.running.mgmt.version = 1;
+ /* Enable DMA reads from the device */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
break;
}
}
-static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
+static bool fbnic_mbx_event(struct fbnic_dev *fbd)
{
- int i;
-
- /* We only need to do this on the first interrupt following init.
+ /* We only need to do this on the first interrupt following reset.
* this primes the mailbox so that we will have cleared all the
* skip descriptors.
*/
if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
- return;
+ return false;
wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
- for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
- fbnic_mbx_postinit_desc_ring(fbd, i);
+ return true;
}
/**
@@ -403,6 +515,7 @@ static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION),
FBNIC_TLV_ATTR_LAST
};
@@ -437,16 +550,13 @@ static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
{
- u32 active_slot = 0, all_multi = 0;
+ u32 all_multi = 0, version = 0;
struct fbnic_dev *fbd = opaque;
- u32 speed = 0, fec = 0;
- size_t commit_size = 0;
bool bmc_present;
int err;
- get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION,
- fbd->fw_cap.running.mgmt.version);
-
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
+ fbd->fw_cap.running.mgmt.version = version;
if (!fbd->fw_cap.running.mgmt.version)
return -EINVAL;
@@ -467,43 +577,41 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
return -EINVAL;
}
- get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size,
- fbd->fw_cap.running.mgmt.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
- if (!commit_size)
+ if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
+ fbd->fw_cap.running.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
- get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION,
- fbd->fw_cap.stored.mgmt.version);
- get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.mgmt.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION,
- fbd->fw_cap.running.bootloader.version);
- get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size,
- fbd->fw_cap.running.bootloader.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION,
- fbd->fw_cap.stored.bootloader.version);
- get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.bootloader.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION,
- fbd->fw_cap.stored.undi.version);
- get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.undi.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot);
- fbd->fw_cap.active_slot = active_slot;
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed);
- get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec);
- fbd->fw_cap.link_speed = speed;
- fbd->fw_cap.link_fec = fec;
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
+ fbd->fw_cap.stored.mgmt.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
+ fbd->fw_cap.stored.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
+ fbd->fw_cap.running.bootloader.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
+ fbd->fw_cap.running.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
+ fbd->fw_cap.stored.bootloader.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
+ fbd->fw_cap.stored.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
+ fbd->fw_cap.stored.undi.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
+ fbd->fw_cap.stored.undi.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ fbd->fw_cap.active_slot =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
+ fbd->fw_cap.link_speed =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
+ fbd->fw_cap.link_fec =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
if (bmc_present) {
@@ -518,7 +626,8 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
if (err)
return err;
- get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi);
+ all_multi =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
} else {
memset(fbd->fw_cap.bmc_mac_addr, 0,
sizeof(fbd->fw_cap.bmc_mac_addr));
@@ -529,6 +638,9 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
fbd->fw_cap.all_multi = all_multi;
+ fbd->fw_cap.anti_rollback_version =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION);
+
return 0;
}
@@ -651,6 +763,265 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
dev_warn(fbd->dev, "Failed to send heartbeat message\n");
}
+int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ unsigned int id, unsigned int len)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ if (!len)
+ return -EINVAL;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH,
+ len);
+ if (err)
+ goto free_message;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Check for errors */
+ err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR);
+
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
+ const u8 *data, u32 offset, u16 length,
+ int cancel_error)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP);
+ if (!msg)
+ return -ENOMEM;
+
+ /* Report error to FW to cancel upgrade */
+ if (cancel_error) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR,
+ cancel_error);
+ if (err)
+ goto free_message;
+ }
+
+ if (data) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET,
+ offset);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH,
+ length);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA,
+ data + offset, length);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_write_chunk_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ u32 offset;
+ u32 length;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Pull length/offset pair and mark it as complete */
+ offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH);
+ cmpl_data->u.fw_update.offset = offset;
+ cmpl_data->u.fw_update.length = length;
+
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Check for errors */
+ err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR);
+
+ /* Close out update by incrementing offset by length which should
+ * match the total size of the component. Set length to 0 since no
+ * new chunks will be requested.
+ */
+ cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length;
+ cmpl_data->u.fw_update.length = 0;
+
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+/**
+ * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Completion data structure to store sensor response
+ *
+ * Asks the firmware to provide an update with the latest sensor data.
+ * The response will contain temperature and voltage readings.
+ *
+ * Return: 0 on success, negative error value on failure
+ */
+int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_tsene_read_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ s32 err_resp;
+ int err = 0;
+
+ /* Verify we have a completion pointer to provide with data */
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
+ FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
+ if (err_resp)
+ goto msg_err;
+
+ if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ cmpl_data->u.tsene.millidegrees =
+ fta_get_sint(results, FBNIC_FW_TSENE_THERM);
+ cmpl_data->u.tsene.millivolts =
+ fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
+
+msg_err:
+ cmpl_data->result = err_resp ? : err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
fbnic_fw_parse_cap_resp),
@@ -658,6 +1029,18 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
fbnic_fw_parse_ownership_resp),
FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP,
+ fbnic_fw_start_upgrade_resp_index,
+ fbnic_fw_parse_fw_start_upgrade_resp),
+ FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ,
+ fbnic_fw_write_chunk_req_index,
+ fbnic_fw_parse_fw_write_chunk_req),
+ FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ,
+ fbnic_fw_finish_upgrade_req_index,
+ fbnic_fw_parse_fw_finish_upgrade_req),
+ FBNIC_TLV_PARSER(TSENE_READ_RESP,
+ fbnic_tsene_read_resp_index,
+ fbnic_fw_parse_tsene_read_resp),
FBNIC_TLV_MSG_ERROR
};
@@ -726,7 +1109,7 @@ next_page:
void fbnic_mbx_poll(struct fbnic_dev *fbd)
{
- fbnic_mbx_postinit(fbd);
+ fbnic_mbx_event(fbd);
fbnic_mbx_process_tx_msgs(fbd);
fbnic_mbx_process_rx_msgs(fbd);
@@ -734,60 +1117,103 @@ void fbnic_mbx_poll(struct fbnic_dev *fbd)
int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
{
- struct fbnic_fw_mbx *tx_mbx;
- int attempts = 50;
+ unsigned long timeout = jiffies + 10 * HZ + 1;
+ int err, i;
- /* Immediate fail if BAR4 isn't there */
- if (!fbnic_fw_present(fbd))
- return -ENODEV;
+ do {
+ if (!time_is_after_jiffies(timeout))
+ return -ETIMEDOUT;
- tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
- while (!tx_mbx->ready && --attempts) {
/* Force the firmware to trigger an interrupt response to
* avoid the mailbox getting stuck closed if the interrupt
* is reset.
*/
- fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
+ fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
- msleep(200);
+ /* Immediate fail if BAR4 went away */
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msleep(20);
+ } while (!fbnic_mbx_event(fbd));
+
+ /* FW has shown signs of life. Enable DMA and start Tx/Rx */
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_init_desc_ring(fbd, i);
+
+ /* Request an update from the firmware. This should overwrite
+ * mgmt.version once we get the actual version from the firmware
+ * in the capabilities request message.
+ */
+ err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
+ if (err)
+ goto clean_mbx;
+
+ /* Use "1" to indicate we entered the state waiting for a response */
+ fbd->fw_cap.running.mgmt.version = 1;
+
+ return 0;
+clean_mbx:
+ /* Cleanup Rx buffers and disable mailbox */
+ fbnic_mbx_clean(fbd);
+ return err;
+}
+
+static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
+{
+ cmpl_data->result = -EPIPE;
+ complete(&cmpl_data->done);
+}
+
+static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i];
- fbnic_mbx_poll(fbd);
+ if (cmpl_data)
+ __fbnic_fw_evict_cmpl(cmpl_data);
}
- return attempts ? 0 : -ETIMEDOUT;
+ memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data));
}
void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
{
+ unsigned long timeout = jiffies + 10 * HZ + 1;
struct fbnic_fw_mbx *tx_mbx;
- int attempts = 50;
- u8 count = 0;
-
- /* Nothing to do if there is no mailbox */
- if (!fbnic_fw_present(fbd))
- return;
+ u8 tail;
/* Record current Rx stats */
tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
- /* Nothing to do if mailbox never got to ready */
- if (!tx_mbx->ready)
- return;
+ spin_lock_irq(&fbd->fw_tx_lock);
+
+ /* Clear ready to prevent any further attempts to transmit */
+ tx_mbx->ready = false;
+
+ /* Read tail to determine the last tail state for the ring */
+ tail = tx_mbx->tail;
+
+ /* Flush any completions as we are no longer processing Rx */
+ fbnic_mbx_evict_all_cmpl(fbd);
+
+ spin_unlock_irq(&fbd->fw_tx_lock);
/* Give firmware time to process packet,
- * we will wait up to 10 seconds which is 50 waits of 200ms.
+ * we will wait up to 10 seconds which is 500 waits of 20ms.
*/
do {
u8 head = tx_mbx->head;
- if (head == tx_mbx->tail)
+ /* Tx ring is empty once head == tail */
+ if (head == tail)
break;
- msleep(200);
+ msleep(20);
fbnic_mbx_process_tx_msgs(fbd);
-
- count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
- } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
+ } while (time_is_after_jiffies(timeout));
}
void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
@@ -802,3 +1228,33 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
fw_version, str_sz);
}
+
+struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
+{
+ struct fbnic_fw_completion *cmpl;
+
+ cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
+ if (!cmpl)
+ return NULL;
+
+ cmpl->msg_type = msg_type;
+ init_completion(&cmpl->done);
+ kref_init(&cmpl->ref_count);
+
+ return cmpl;
+}
+
+void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *fw_cmpl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+}
+
+void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
+{
+ kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index 7cd8841920e4..08bc4b918de7 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -42,23 +42,48 @@ struct fbnic_fw_cap {
u8 all_multi : 1;
u8 link_speed;
u8 link_fec;
+ u32 anti_rollback_version;
};
struct fbnic_fw_completion {
- struct {
- s32 millivolts;
- s32 millidegrees;
- } tsene;
+ u32 msg_type;
+ struct completion done;
+ struct kref ref_count;
+ int result;
+ union {
+ struct {
+ u32 offset;
+ u32 length;
+ } fw_update;
+ struct {
+ s32 millivolts;
+ s32 millidegrees;
+ } tsene;
+ } u;
};
void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
+int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
void fbnic_mbx_poll(struct fbnic_dev *fbd);
int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd);
void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ unsigned int id, unsigned int len);
+int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
+ const u8 *data, u32 offset, u16 length,
+ int cancel_error);
+int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
+struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type);
+void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
+void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
do { \
@@ -74,6 +99,15 @@ do { \
#define fbnic_mk_fw_ver_str(_rev_id, _str) \
fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str, sizeof(_str))
+enum {
+ QSPI_SECTION_CMRT = 0,
+ QSPI_SECTION_CONTROL_FW = 1,
+ QSPI_SECTION_UCODE = 2,
+ QSPI_SECTION_OPTION_ROM = 3,
+ QSPI_SECTION_USER = 4,
+ QSPI_SECTION_INVALID,
+};
+
#define FW_HEARTBEAT_PERIOD (10 * HZ)
enum {
@@ -83,6 +117,14 @@ enum {
FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+ FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ = 0x22,
+ FBNIC_TLV_MSG_ID_FW_START_UPGRADE_RESP = 0x23,
+ FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ = 0x24,
+ FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP = 0x25,
+ FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_REQ = 0x28,
+ FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_RESP = 0x29,
+ FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C,
+ FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D,
};
#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24)
@@ -108,6 +150,7 @@ enum {
FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR = 0x10,
FBNIC_FW_CAP_RESP_UEFI_VERSION = 0x11,
FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR = 0x12,
+ FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION = 0x15,
FBNIC_FW_CAP_RESP_MSG_MAX
};
@@ -125,7 +168,35 @@ enum {
};
enum {
+ FBNIC_FW_TSENE_THERM = 0x0,
+ FBNIC_FW_TSENE_VOLT = 0x1,
+ FBNIC_FW_TSENE_ERROR = 0x2,
+ FBNIC_FW_TSENE_MSG_MAX
+};
+
+enum {
FBNIC_FW_OWNERSHIP_FLAG = 0x0,
FBNIC_FW_OWNERSHIP_MSG_MAX
};
+
+enum {
+ FBNIC_FW_START_UPGRADE_ERROR = 0x0,
+ FBNIC_FW_START_UPGRADE_SECTION = 0x1,
+ FBNIC_FW_START_UPGRADE_IMAGE_LENGTH = 0x2,
+ FBNIC_FW_START_UPGRADE_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_WRITE_CHUNK_OFFSET = 0x0,
+ FBNIC_FW_WRITE_CHUNK_LENGTH = 0x1,
+ FBNIC_FW_WRITE_CHUNK_DATA = 0x2,
+ FBNIC_FW_WRITE_CHUNK_ERROR = 0x3,
+ FBNIC_FW_WRITE_CHUNK_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_FINISH_UPGRADE_ERROR = 0x0,
+ FBNIC_FW_FINISH_UPGRADE_MSG_MAX
+};
+
#endif /* _FBNIC_FW_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
index 89ac6bc8c7fc..4223d8100e64 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
@@ -70,6 +70,100 @@ static void fbnic_hw_stat_rd64(struct fbnic_dev *fbd, u32 reg, s32 offset,
stat->u.old_reg_value_64 = new_reg_value;
}
+static void fbnic_reset_tmi_stats(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TMI_ILLEGAL_PTP_REQS,
+ &tmi->ptp_illegal_req);
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts);
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts);
+}
+
+static void fbnic_get_tmi_stats32(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TMI_ILLEGAL_PTP_REQS,
+ &tmi->ptp_illegal_req);
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts);
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts);
+}
+
+static void fbnic_get_tmi_stats(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes);
+}
+
+static void fbnic_reset_tti_stats(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TTI_CM_DROP_PKTS,
+ &tti->cm_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TTI_CM_DROP_BYTE_L,
+ 1,
+ &tti->cm_drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_PKTS,
+ &tti->frame_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_BYTE_L,
+ 1,
+ &tti->frame_drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TBI_DROP_PKTS,
+ &tti->tbi_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TBI_DROP_BYTE_L,
+ 1,
+ &tti->tbi_drop.bytes);
+}
+
+static void fbnic_get_tti_stats32(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TTI_CM_DROP_PKTS,
+ &tti->cm_drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_PKTS,
+ &tti->frame_drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TBI_DROP_PKTS,
+ &tti->tbi_drop.frames);
+}
+
+static void fbnic_get_tti_stats(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TTI_CM_DROP_BYTE_L,
+ 1,
+ &tti->cm_drop.bytes);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_BYTE_L,
+ 1,
+ &tti->frame_drop.bytes);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TBI_DROP_BYTE_L,
+ 1,
+ &tti->tbi_drop.bytes);
+}
+
static void fbnic_reset_rpc_stats(struct fbnic_dev *fbd,
struct fbnic_rpc_stats *rpc)
{
@@ -117,6 +211,221 @@ static void fbnic_get_rpc_stats32(struct fbnic_dev *fbd,
&rpc->ovr_size_err);
}
+static void fbnic_reset_rxb_fifo_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DROP_FRMS_STS(i),
+ &fifo->drop.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1,
+ &fifo->drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i),
+ &fifo->trunc.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1,
+ &fifo->trunc.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_DROP_STS(i),
+ &fifo->trans_drop);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_ECN_STS(i),
+ &fifo->trans_ecn);
+
+ fifo->level.u.old_reg_value_32 = 0;
+}
+
+static void fbnic_reset_rxb_enq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i),
+ &enq->drbo.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4,
+ &enq->drbo.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTEGRITY_ERR(i),
+ &enq->integrity_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_MAC_ERR(i),
+ &enq->mac_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PARSER_ERR(i),
+ &enq->parser_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_FRM_ERR(i),
+ &enq->frm_err);
+}
+
+static void fbnic_reset_rxb_deq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i),
+ &deq->intf.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4,
+ &deq->intf.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i),
+ &deq->pbuf.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4,
+ &deq->pbuf.bytes);
+}
+
+static void fbnic_reset_rxb_stats(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_reset_rxb_fifo_stats(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_reset_rxb_enq_stats(fbd, i, &rxb->enq[i]);
+ fbnic_reset_rxb_deq_stats(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_get_rxb_fifo_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DROP_FRMS_STS(i),
+ &fifo->drop.frames);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i),
+ &fifo->trunc.frames);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_DROP_STS(i),
+ &fifo->trans_drop);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_ECN_STS(i),
+ &fifo->trans_ecn);
+
+ fifo->level.value = rd32(fbd, FBNIC_RXB_PBUF_FIFO_LEVEL(i));
+}
+
+static void fbnic_get_rxb_fifo_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1,
+ &fifo->drop.bytes);
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1,
+ &fifo->trunc.bytes);
+
+ fbnic_get_rxb_fifo_stats32(fbd, i, fifo);
+}
+
+static void fbnic_get_rxb_enq_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i),
+ &enq->drbo.frames);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTEGRITY_ERR(i),
+ &enq->integrity_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_MAC_ERR(i),
+ &enq->mac_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PARSER_ERR(i),
+ &enq->parser_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_FRM_ERR(i),
+ &enq->frm_err);
+}
+
+static void fbnic_get_rxb_enq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4,
+ &enq->drbo.bytes);
+
+ fbnic_get_rxb_enq_stats32(fbd, i, enq);
+}
+
+static void fbnic_get_rxb_deq_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i),
+ &deq->intf.frames);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i),
+ &deq->pbuf.frames);
+}
+
+static void fbnic_get_rxb_deq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4,
+ &deq->intf.bytes);
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4,
+ &deq->pbuf.bytes);
+
+ fbnic_get_rxb_deq_stats32(fbd, i, deq);
+}
+
+static void fbnic_get_rxb_stats32(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_stats32(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_get_rxb_enq_stats32(fbd, i, &rxb->enq[i]);
+ fbnic_get_rxb_deq_stats32(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_get_rxb_stats(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_stats(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_get_rxb_enq_stats(fbd, i, &rxb->enq[i]);
+ fbnic_get_rxb_deq_stats(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_reset_hw_rxq_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ int i;
+
+ for (i = 0; i < fbd->max_num_queues; i++, hw_q++) {
+ u32 base = FBNIC_QUEUE(i);
+
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_PKT_ERR_CNT,
+ &hw_q->rde_pkt_err);
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_CQ_DROP_CNT,
+ &hw_q->rde_pkt_cq_drop);
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT,
+ &hw_q->rde_pkt_bdq_drop);
+ }
+}
+
+static void fbnic_get_hw_rxq_stats32(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ int i;
+
+ for (i = 0; i < fbd->max_num_queues; i++, hw_q++) {
+ u32 base = FBNIC_QUEUE(i);
+
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_PKT_ERR_CNT,
+ &hw_q->rde_pkt_err);
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_CQ_DROP_CNT,
+ &hw_q->rde_pkt_cq_drop);
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT,
+ &hw_q->rde_pkt_bdq_drop);
+ }
+}
+
+void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ spin_lock(&fbd->hw_stats_lock);
+ fbnic_get_hw_rxq_stats32(fbd, hw_q);
+ spin_unlock(&fbd->hw_stats_lock);
+}
+
static void fbnic_reset_pcie_stats_asic(struct fbnic_dev *fbd,
struct fbnic_pcie_stats *pcie)
{
@@ -203,18 +512,40 @@ static void fbnic_get_pcie_stats_asic64(struct fbnic_dev *fbd,
void fbnic_reset_hw_stats(struct fbnic_dev *fbd)
{
+ spin_lock(&fbd->hw_stats_lock);
+ fbnic_reset_tmi_stats(fbd, &fbd->hw_stats.tmi);
+ fbnic_reset_tti_stats(fbd, &fbd->hw_stats.tti);
fbnic_reset_rpc_stats(fbd, &fbd->hw_stats.rpc);
+ fbnic_reset_rxb_stats(fbd, &fbd->hw_stats.rxb);
+ fbnic_reset_hw_rxq_stats(fbd, fbd->hw_stats.hw_q);
fbnic_reset_pcie_stats_asic(fbd, &fbd->hw_stats.pcie);
+ spin_unlock(&fbd->hw_stats_lock);
}
-void fbnic_get_hw_stats32(struct fbnic_dev *fbd)
+static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd)
{
+ fbnic_get_tmi_stats32(fbd, &fbd->hw_stats.tmi);
+ fbnic_get_tti_stats32(fbd, &fbd->hw_stats.tti);
fbnic_get_rpc_stats32(fbd, &fbd->hw_stats.rpc);
+ fbnic_get_rxb_stats32(fbd, &fbd->hw_stats.rxb);
+ fbnic_get_hw_rxq_stats32(fbd, fbd->hw_stats.hw_q);
+}
+
+void fbnic_get_hw_stats32(struct fbnic_dev *fbd)
+{
+ spin_lock(&fbd->hw_stats_lock);
+ __fbnic_get_hw_stats32(fbd);
+ spin_unlock(&fbd->hw_stats_lock);
}
void fbnic_get_hw_stats(struct fbnic_dev *fbd)
{
- fbnic_get_hw_stats32(fbd);
+ spin_lock(&fbd->hw_stats_lock);
+ __fbnic_get_hw_stats32(fbd);
+ fbnic_get_tmi_stats(fbd, &fbd->hw_stats.tmi);
+ fbnic_get_tti_stats(fbd, &fbd->hw_stats.tti);
+ fbnic_get_rxb_stats(fbd, &fbd->hw_stats.rxb);
fbnic_get_pcie_stats_asic64(fbd, &fbd->hw_stats.pcie);
+ spin_unlock(&fbd->hw_stats_lock);
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
index 78df56b87745..07e54bb75bf3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
@@ -17,6 +17,11 @@ struct fbnic_stat_counter {
bool reported;
};
+struct fbnic_hw_stat {
+ struct fbnic_stat_counter frames;
+ struct fbnic_stat_counter bytes;
+};
+
struct fbnic_eth_mac_stats {
struct fbnic_stat_counter FramesTransmittedOK;
struct fbnic_stat_counter FramesReceivedOK;
@@ -37,12 +42,49 @@ struct fbnic_mac_stats {
struct fbnic_eth_mac_stats eth_mac;
};
+struct fbnic_tmi_stats {
+ struct fbnic_hw_stat drop;
+ struct fbnic_stat_counter ptp_illegal_req, ptp_good_ts, ptp_bad_ts;
+};
+
+struct fbnic_tti_stats {
+ struct fbnic_hw_stat cm_drop, frame_drop, tbi_drop;
+};
+
struct fbnic_rpc_stats {
struct fbnic_stat_counter unkn_etype, unkn_ext_hdr;
struct fbnic_stat_counter ipv4_frag, ipv6_frag, ipv4_esp, ipv6_esp;
struct fbnic_stat_counter tcp_opt_err, out_of_hdr_err, ovr_size_err;
};
+struct fbnic_rxb_enqueue_stats {
+ struct fbnic_hw_stat drbo;
+ struct fbnic_stat_counter integrity_err, mac_err;
+ struct fbnic_stat_counter parser_err, frm_err;
+};
+
+struct fbnic_rxb_fifo_stats {
+ struct fbnic_hw_stat drop, trunc;
+ struct fbnic_stat_counter trans_drop, trans_ecn;
+ struct fbnic_stat_counter level;
+};
+
+struct fbnic_rxb_dequeue_stats {
+ struct fbnic_hw_stat intf, pbuf;
+};
+
+struct fbnic_rxb_stats {
+ struct fbnic_rxb_enqueue_stats enq[FBNIC_RXB_ENQUEUE_INDICES];
+ struct fbnic_rxb_fifo_stats fifo[FBNIC_RXB_FIFO_INDICES];
+ struct fbnic_rxb_dequeue_stats deq[FBNIC_RXB_DEQUEUE_INDICES];
+};
+
+struct fbnic_hw_q_stats {
+ struct fbnic_stat_counter rde_pkt_err;
+ struct fbnic_stat_counter rde_pkt_cq_drop;
+ struct fbnic_stat_counter rde_pkt_bdq_drop;
+};
+
struct fbnic_pcie_stats {
struct fbnic_stat_counter ob_rd_tlp, ob_rd_dword;
struct fbnic_stat_counter ob_wr_tlp, ob_wr_dword;
@@ -55,13 +97,19 @@ struct fbnic_pcie_stats {
struct fbnic_hw_stats {
struct fbnic_mac_stats mac;
+ struct fbnic_tmi_stats tmi;
+ struct fbnic_tti_stats tti;
struct fbnic_rpc_stats rpc;
+ struct fbnic_rxb_stats rxb;
+ struct fbnic_hw_q_stats hw_q[FBNIC_MAX_QUEUES];
struct fbnic_pcie_stats pcie;
};
u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset);
void fbnic_reset_hw_stats(struct fbnic_dev *fbd);
+void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q);
void fbnic_get_hw_stats32(struct fbnic_dev *fbd);
void fbnic_get_hw_stats(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
index bcd1086e3768..def8598aceec 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
@@ -66,7 +66,7 @@ void fbnic_hwmon_register(struct fbnic_dev *fbd)
if (IS_ERR(fbd->hwmon)) {
dev_notice(fbd->dev,
"Failed to register hwmon device %pe\n",
- fbd->hwmon);
+ fbd->hwmon);
fbd->hwmon = NULL;
}
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
index 914362195920..1c88a2bf3a7a 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
@@ -19,69 +19,105 @@ static irqreturn_t fbnic_fw_msix_intr(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static int __fbnic_fw_enable_mbx(struct fbnic_dev *fbd, int vector)
+{
+ int err;
+
+ /* Initialize mailbox and attempt to poll it into ready state */
+ fbnic_mbx_init(fbd);
+ err = fbnic_mbx_poll_tx_ready(fbd);
+ if (err) {
+ dev_warn(fbd->dev, "FW mailbox did not enter ready state\n");
+ return err;
+ }
+
+ /* Enable interrupt and unmask the vector */
+ enable_irq(vector);
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ return 0;
+}
+
/**
- * fbnic_fw_enable_mbx - Configure and initialize Firmware Mailbox
+ * fbnic_fw_request_mbx - Configure and initialize Firmware Mailbox
* @fbd: Pointer to device to initialize
*
- * This function will initialize the firmware mailbox rings, enable the IRQ
- * and initialize the communication between the Firmware and the host. The
- * firmware is expected to respond to the initialization by sending an
- * interrupt essentially notifying the host that it has seen the
- * initialization and is now synced up.
+ * This function will allocate the IRQ and then reinitialize the mailbox
+ * starting communication between the host and firmware.
*
* Return: non-zero on failure.
**/
-int fbnic_fw_enable_mbx(struct fbnic_dev *fbd)
+int fbnic_fw_request_mbx(struct fbnic_dev *fbd)
{
- u32 vector = fbd->fw_msix_vector;
- int err;
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int vector, err;
+
+ WARN_ON(fbd->fw_msix_vector);
+
+ vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY);
+ if (vector < 0)
+ return vector;
/* Request the IRQ for FW Mailbox vector. */
err = request_threaded_irq(vector, NULL, &fbnic_fw_msix_intr,
- IRQF_ONESHOT, dev_name(fbd->dev), fbd);
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ dev_name(fbd->dev), fbd);
if (err)
return err;
/* Initialize mailbox and attempt to poll it into ready state */
- fbnic_mbx_init(fbd);
- err = fbnic_mbx_poll_tx_ready(fbd);
- if (err) {
- dev_warn(fbd->dev, "FW mailbox did not enter ready state\n");
+ err = __fbnic_fw_enable_mbx(fbd, vector);
+ if (err)
free_irq(vector, fbd);
- return err;
- }
- /* Enable interrupts */
- fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+ fbd->fw_msix_vector = vector;
- return 0;
+ return err;
}
/**
- * fbnic_fw_disable_mbx - Disable mailbox and place it in standby state
- * @fbd: Pointer to device to disable
+ * fbnic_fw_disable_mbx - Temporarily place mailbox in standby state
+ * @fbd: Pointer to device
*
- * This function will disable the mailbox interrupt, free any messages still
- * in the mailbox and place it into a standby state. The firmware is
- * expected to see the update and assume that the host is in the reset state.
+ * Shutdown the mailbox by notifying the firmware to stop sending us logs, mask
+ * and synchronize the IRQ, and then clean up the rings.
**/
-void fbnic_fw_disable_mbx(struct fbnic_dev *fbd)
+static void fbnic_fw_disable_mbx(struct fbnic_dev *fbd)
{
- /* Disable interrupt and free vector */
- fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY);
+ /* Disable interrupt and synchronize the IRQ */
+ disable_irq(fbd->fw_msix_vector);
- /* Free the vector */
- free_irq(fbd->fw_msix_vector, fbd);
+ /* Mask the vector */
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY);
/* Make sure disabling logs message is sent, must be done here to
* avoid risk of completing without a running interrupt.
*/
fbnic_mbx_flush_tx(fbd);
-
- /* Reset the mailboxes to the initialized state */
fbnic_mbx_clean(fbd);
}
+/**
+ * fbnic_fw_free_mbx - Disable mailbox and place it in standby state
+ * @fbd: Pointer to device to disable
+ *
+ * This function will disable the mailbox interrupt, free any messages still
+ * in the mailbox and place it into a disabled state. The firmware is
+ * expected to see the update and assume that the host is in the reset state.
+ **/
+void fbnic_fw_free_mbx(struct fbnic_dev *fbd)
+{
+ /* Vector has already been freed */
+ if (!fbd->fw_msix_vector)
+ return;
+
+ fbnic_fw_disable_mbx(fbd);
+
+ /* Free the vector */
+ free_irq(fbd->fw_msix_vector, fbd);
+ fbd->fw_msix_vector = 0;
+}
+
static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
{
struct fbnic_dev *fbd = data;
@@ -101,7 +137,7 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
}
/**
- * fbnic_pcs_irq_enable - Configure the MAC to enable it to advertise link
+ * fbnic_pcs_request_irq - Configure the PCS to enable it to advertise link
* @fbd: Pointer to device to initialize
*
* This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ
@@ -109,41 +145,72 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
*
* Return: non-zero on failure.
**/
-int fbnic_pcs_irq_enable(struct fbnic_dev *fbd)
+int fbnic_pcs_request_irq(struct fbnic_dev *fbd)
{
- u32 vector = fbd->pcs_msix_vector;
- int err;
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int vector, err;
+
+ WARN_ON(fbd->pcs_msix_vector);
+
+ vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY);
+ if (vector < 0)
+ return vector;
- /* Request the IRQ for MAC link vector.
- * Map MAC cause to it, and unmask it
+ /* Request the IRQ for PCS link vector.
+ * Map PCS cause to it, and unmask it
*/
err = request_irq(vector, &fbnic_pcs_msix_intr, 0,
fbd->netdev->name, fbd);
if (err)
return err;
+ /* Map and enable interrupt, unmask vector after link is configured */
fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE);
+ fbd->pcs_msix_vector = vector;
+
return 0;
}
/**
- * fbnic_pcs_irq_disable - Teardown the MAC IRQ to prepare for stopping
+ * fbnic_pcs_free_irq - Teardown the PCS IRQ to prepare for stopping
* @fbd: Pointer to device that is stopping
*
- * This function undoes the work done in fbnic_pcs_irq_enable and prepares
+ * This function undoes the work done in fbnic_pcs_request_irq and prepares
* the device to no longer receive traffic on the host interface.
**/
-void fbnic_pcs_irq_disable(struct fbnic_dev *fbd)
+void fbnic_pcs_free_irq(struct fbnic_dev *fbd)
{
+ /* Vector has already been freed */
+ if (!fbd->pcs_msix_vector)
+ return;
+
/* Disable interrupt */
fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
FBNIC_PCS_MSIX_ENTRY);
+ fbnic_wrfl(fbd);
+
+ /* Synchronize IRQ to prevent race that would unmask vector */
+ synchronize_irq(fbd->pcs_msix_vector);
+
+ /* Mask the vector */
fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY);
/* Free the vector */
free_irq(fbd->pcs_msix_vector, fbd);
+ fbd->pcs_msix_vector = 0;
+}
+
+void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return;
+
+ synchronize_irq(irq);
}
int fbnic_request_irq(struct fbnic_dev *fbd, int nr, irq_handler_t handler,
@@ -169,13 +236,52 @@ void fbnic_free_irq(struct fbnic_dev *fbd, int nr, void *data)
free_irq(irq, data);
}
+void fbnic_napi_name_irqs(struct fbnic_dev *fbd)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fbd->napi_irq); i++)
+ snprintf(fbd->napi_irq[i].name,
+ sizeof(fbd->napi_irq[i].name),
+ "%s-TxRx-%u", fbd->netdev->name, i);
+}
+
+int fbnic_napi_request_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ int i = fbnic_napi_idx(nv);
+ int err;
+
+ if (!fbd->napi_irq[i].users) {
+ err = fbnic_request_irq(fbd, nv->v_idx,
+ fbnic_msix_clean_rings, 0,
+ fbd->napi_irq[i].name,
+ &fbn->napi[i]);
+ if (err)
+ return err;
+ }
+
+ fbd->napi_irq[i].users++;
+ return 0;
+}
+
+void fbnic_napi_free_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ int i = fbnic_napi_idx(nv);
+
+ if (--fbd->napi_irq[i].users)
+ return;
+
+ fbnic_free_irq(fbd, nv->v_idx, &fbn->napi[i]);
+}
+
void fbnic_free_irqs(struct fbnic_dev *fbd)
{
struct pci_dev *pdev = to_pci_dev(fbd->dev);
- fbd->pcs_msix_vector = 0;
- fbd->fw_msix_vector = 0;
-
fbd->num_irqs = 0;
pci_free_irq_vectors(pdev);
@@ -201,8 +307,5 @@ int fbnic_alloc_irqs(struct fbnic_dev *fbd)
fbd->num_irqs = num_irqs;
- fbd->pcs_msix_vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY);
- fbd->fw_msix_vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY);
-
return 0;
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 80b82ff12c4d..10e108c1fcd0 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -79,12 +79,6 @@ static void fbnic_mac_init_axi(struct fbnic_dev *fbd)
fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq);
fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps);
fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps);
-
- /* Enable XALI AR/AW outbound */
- wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
- FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
- wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
- FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
}
static void fbnic_mac_init_qm(struct fbnic_dev *fbd)
@@ -686,25 +680,72 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
MAC_STAT_TX_BROADCAST);
}
-static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, long *val)
+static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
+ long *val)
{
- struct fbnic_fw_completion fw_cmpl;
+ struct fbnic_fw_completion *fw_cmpl;
+ int err = 0, retries = 5;
s32 *sensor;
+ fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
switch (id) {
case FBNIC_SENSOR_TEMP:
- sensor = &fw_cmpl.tsene.millidegrees;
+ sensor = &fw_cmpl->u.tsene.millidegrees;
break;
case FBNIC_SENSOR_VOLTAGE:
- sensor = &fw_cmpl.tsene.millivolts;
+ sensor = &fw_cmpl->u.tsene.millivolts;
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto exit_free;
+ }
+
+ err = fbnic_fw_xmit_tsene_read_msg(fbd, fw_cmpl);
+ if (err) {
+ dev_err(fbd->dev,
+ "Failed to transmit TSENE read msg, err %d\n",
+ err);
+ goto exit_free;
+ }
+
+ /* Allow 2 seconds for reply, resend and try up to 5 times */
+ while (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ retries--;
+
+ if (retries == 0) {
+ dev_err(fbd->dev,
+ "Timed out waiting for TSENE read\n");
+ err = -ETIMEDOUT;
+ goto exit_cleanup;
+ }
+
+ err = fbnic_fw_xmit_tsene_read_msg(fbd, NULL);
+ if (err) {
+ dev_err(fbd->dev,
+ "Failed to transmit TSENE read msg, err %d\n",
+ err);
+ goto exit_cleanup;
+ }
+ }
+
+ /* Handle error returned by firmware */
+ if (fw_cmpl->result) {
+ err = fw_cmpl->result;
+ dev_err(fbd->dev, "%s: Firmware returned error %d\n",
+ __func__, err);
+ goto exit_cleanup;
}
*val = *sensor;
+exit_cleanup:
+ fbnic_fw_clear_cmpl(fbd, fw_cmpl);
+exit_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
- return 0;
+ return err;
}
static const struct fbnic_mac fbnic_mac_asic = {
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index fc7d80db5fa6..aa812c63d5af 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -23,13 +23,7 @@ int __fbnic_open(struct fbnic_net *fbn)
if (err)
goto free_napi_vectors;
- err = netif_set_real_num_tx_queues(fbn->netdev,
- fbn->num_tx_queues);
- if (err)
- goto free_resources;
-
- err = netif_set_real_num_rx_queues(fbn->netdev,
- fbn->num_rx_queues);
+ err = fbnic_set_netif_queues(fbn);
if (err)
goto free_resources;
@@ -50,9 +44,10 @@ int __fbnic_open(struct fbnic_net *fbn)
if (err)
goto time_stop;
- err = fbnic_pcs_irq_enable(fbd);
+ err = fbnic_pcs_request_irq(fbd);
if (err)
goto time_stop;
+
/* Pull the BMC config and initialize the RPC */
fbnic_bmc_rpc_init(fbd);
fbnic_rss_reinit(fbd, fbn);
@@ -74,6 +69,8 @@ static int fbnic_open(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
int err;
+ fbnic_napi_name_irqs(fbn->fbd);
+
err = __fbnic_open(fbn);
if (!err)
fbnic_up(fbn);
@@ -86,11 +83,12 @@ static int fbnic_stop(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
fbnic_down(fbn);
- fbnic_pcs_irq_disable(fbn->fbd);
+ fbnic_pcs_free_irq(fbn->fbd);
fbnic_time_stop(fbn);
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+ fbnic_reset_netif_queues(fbn);
fbnic_free_resources(fbn);
fbnic_free_napi_vectors(fbn);
@@ -406,12 +404,16 @@ static int fbnic_hwtstamp_set(struct net_device *netdev,
static void fbnic_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats64)
{
+ u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
u64 tx_bytes, tx_packets, tx_dropped = 0;
- u64 rx_bytes, rx_packets, rx_dropped = 0;
struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
+ u64 rx_over = 0, rx_missed = 0;
unsigned int start, i;
+ fbnic_get_hw_stats(fbd);
+
stats = &fbn->tx_stats;
tx_bytes = stats->bytes;
@@ -422,6 +424,12 @@ static void fbnic_get_stats64(struct net_device *dev,
stats64->tx_packets = tx_packets;
stats64->tx_dropped = tx_dropped;
+ /* Record drops from Tx HW Datapath */
+ tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
+ fbd->hw_stats.tti.cm_drop.frames.value +
+ fbd->hw_stats.tti.frame_drop.frames.value +
+ fbd->hw_stats.tti.tbi_drop.frames.value;
+
for (i = 0; i < fbn->num_tx_queues; i++) {
struct fbnic_ring *txr = fbn->tx[i];
@@ -447,9 +455,34 @@ static void fbnic_get_stats64(struct net_device *dev,
rx_packets = stats->packets;
rx_dropped = stats->dropped;
+ spin_lock(&fbd->hw_stats_lock);
+ /* Record drops for the host FIFOs.
+ * 4: network to Host, 6: BMC to Host
+ * Exclude the BMC and MC FIFOs as those stats may contain drops
+ * due to unrelated items such as TCAM misses. They are still
+ * accessible through the ethtool stats.
+ */
+ i = FBNIC_RXB_FIFO_HOST;
+ rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
+ i = FBNIC_RXB_FIFO_BMC_TO_HOST;
+ rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
+
+ for (i = 0; i < fbd->max_num_queues; i++) {
+ /* Report packets dropped due to CQ/BDQ being full/empty */
+ rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value;
+ rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value;
+
+ /* Report packets with errors */
+ rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value;
+ }
+ spin_unlock(&fbd->hw_stats_lock);
+
stats64->rx_bytes = rx_bytes;
stats64->rx_packets = rx_packets;
stats64->rx_dropped = rx_dropped;
+ stats64->rx_over_errors = rx_over;
+ stats64->rx_errors = rx_errors;
+ stats64->rx_missed_errors = rx_missed;
for (i = 0; i < fbn->num_rx_queues; i++) {
struct fbnic_ring *rxr = fbn->rx[i];
@@ -489,9 +522,11 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
{
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *rxr = fbn->rx[idx];
+ struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
+ u64 bytes, packets, alloc_fail;
+ u64 csum_complete, csum_none;
unsigned int start;
- u64 bytes, packets;
if (!rxr)
return;
@@ -501,10 +536,25 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
start = u64_stats_fetch_begin(&stats->syncp);
bytes = stats->bytes;
packets = stats->packets;
+ alloc_fail = stats->rx.alloc_failed;
+ csum_complete = stats->rx.csum_complete;
+ csum_none = stats->rx.csum_none;
} while (u64_stats_fetch_retry(&stats->syncp, start));
rx->bytes = bytes;
rx->packets = packets;
+ rx->alloc_fail = alloc_fail;
+ rx->csum_complete = csum_complete;
+ rx->csum_none = csum_none;
+
+ fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q);
+
+ spin_lock(&fbd->hw_stats_lock);
+ rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value +
+ fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value;
+ rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value +
+ rx->hw_drop_overruns;
+ spin_unlock(&fbd->hw_stats_lock);
}
static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -513,6 +563,7 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *txr = fbn->tx[idx];
struct fbnic_queue_stats *stats;
+ u64 stop, wake, csum, lso;
unsigned int start;
u64 bytes, packets;
@@ -524,10 +575,18 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
start = u64_stats_fetch_begin(&stats->syncp);
bytes = stats->bytes;
packets = stats->packets;
+ csum = stats->twq.csum_partial;
+ lso = stats->twq.lso;
+ stop = stats->twq.stop;
+ wake = stats->twq.wake;
} while (u64_stats_fetch_retry(&stats->syncp, start));
tx->bytes = bytes;
tx->packets = packets;
+ tx->needs_csum = csum + lso;
+ tx->hw_gso_wire_packets = lso;
+ tx->stop = stop;
+ tx->wake = wake;
}
static void fbnic_get_base_stats(struct net_device *dev,
@@ -538,9 +597,16 @@ static void fbnic_get_base_stats(struct net_device *dev,
tx->bytes = fbn->tx_stats.bytes;
tx->packets = fbn->tx_stats.packets;
+ tx->needs_csum = fbn->tx_stats.twq.csum_partial + fbn->tx_stats.twq.lso;
+ tx->hw_gso_wire_packets = fbn->tx_stats.twq.lso;
+ tx->stop = fbn->tx_stats.twq.stop;
+ tx->wake = fbn->tx_stats.twq.wake;
rx->bytes = fbn->rx_stats.bytes;
rx->packets = fbn->rx_stats.packets;
+ rx->alloc_fail = fbn->rx_stats.rx.alloc_failed;
+ rx->csum_complete = fbn->rx_stats.rx.csum_complete;
+ rx->csum_none = fbn->rx_stats.rx.csum_none;
}
static const struct netdev_stat_ops fbnic_stat_ops = {
@@ -591,7 +657,7 @@ void fbnic_netdev_free(struct fbnic_dev *fbd)
* Allocate and initialize the netdev and netdev private structure. Bind
* together the hardware, netdev, and pci data structures.
*
- * Return: 0 on success, negative on failure
+ * Return: Pointer to net_device on success, NULL on failure
**/
struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
{
@@ -615,13 +681,16 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbn->netdev = netdev;
fbn->fbd = fbd;
- INIT_LIST_HEAD(&fbn->napis);
fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
+ fbn->tx_usecs = FBNIC_TX_USECS_DEFAULT;
+ fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT;
+ fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT;
+
default_queues = netif_get_num_default_rss_queues();
if (default_queues > fbd->max_num_queues)
default_queues = fbd->max_num_queues;
@@ -632,15 +701,32 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbnic_rss_key_fill(fbn->rss_key);
fbnic_rss_init_en_mask(fbn);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->gso_partial_features =
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
netdev->features |=
+ netdev->gso_partial_features |
+ FBNIC_TUN_GSO_FEATURES |
NETIF_F_RXHASH |
NETIF_F_SG |
NETIF_F_HW_CSUM |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_L4;
netdev->hw_features |= netdev->features;
netdev->vlan_features |= netdev->features;
netdev->hw_enc_features |= netdev->features;
+ netdev->features |= NETIF_F_NTUPLE;
netdev->min_mtu = IPV6_MIN_MTU;
netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index b8417b300778..561837e80ec8 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -11,10 +11,18 @@
#include "fbnic_rpc.h"
#include "fbnic_txrx.h"
+#define FBNIC_MAX_NAPI_VECTORS 128u
+#define FBNIC_MIN_RXD_PER_FRAME 2
+
+/* Natively supported tunnel GSO features (not thru GSO_PARTIAL) */
+#define FBNIC_TUN_GSO_FEATURES NETIF_F_GSO_IPXIP6
+
struct fbnic_net {
struct fbnic_ring *tx[FBNIC_MAX_TXQS];
struct fbnic_ring *rx[FBNIC_MAX_RXQS];
+ struct fbnic_napi_vector *napi[FBNIC_MAX_NAPI_VECTORS];
+
struct net_device *netdev;
struct fbnic_dev *fbd;
@@ -23,6 +31,11 @@ struct fbnic_net {
u32 ppq_size;
u32 rcq_size;
+ u16 rx_usecs;
+ u16 tx_usecs;
+
+ u32 rx_max_frames;
+
u16 num_napi;
struct phylink *phylink;
@@ -56,13 +69,12 @@ struct fbnic_net {
/* Time stampinn filter config */
struct kernel_hwtstamp_config hwtstamp_config;
-
- struct list_head napis;
};
int __fbnic_open(struct fbnic_net *fbn);
void fbnic_up(struct fbnic_net *fbn);
void fbnic_down(struct fbnic_net *fbn);
+void fbnic_down_noidle(struct fbnic_net *fbn);
struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd);
void fbnic_netdev_free(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 32702dc4a066..249d3ef862d5 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/rtnetlink.h>
#include <linux/types.h>
+#include <net/devlink.h>
#include "fbnic.h"
#include "fbnic_drvinfo.h"
@@ -145,7 +146,7 @@ void fbnic_up(struct fbnic_net *fbn)
fbnic_service_task_start(fbn);
}
-static void fbnic_down_noidle(struct fbnic_net *fbn)
+void fbnic_down_noidle(struct fbnic_net *fbn)
{
fbnic_service_task_stop(fbn);
@@ -283,7 +284,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto free_irqs;
}
- err = fbnic_fw_enable_mbx(fbd);
+ err = fbnic_fw_request_mbx(fbd);
if (err) {
dev_err(&pdev->dev,
"Firmware mailbox initialization failure\n");
@@ -292,6 +293,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fbnic_devlink_register(fbd);
fbnic_dbg_fbd_init(fbd);
+ spin_lock_init(&fbd->hw_stats_lock);
/* Capture snapshot of hardware stats so netdev can calculate delta */
fbnic_reset_hw_stats(fbd);
@@ -363,7 +365,7 @@ static void fbnic_remove(struct pci_dev *pdev)
fbnic_hwmon_unregister(fbd);
fbnic_dbg_fbd_exit(fbd);
fbnic_devlink_unregister(fbd);
- fbnic_fw_disable_mbx(fbd);
+ fbnic_fw_free_mbx(fbd);
fbnic_free_irqs(fbd);
fbnic_devlink_free(fbd);
@@ -387,7 +389,11 @@ static int fbnic_pm_suspend(struct device *dev)
rtnl_unlock();
null_uc_addr:
- fbnic_fw_disable_mbx(fbd);
+ devl_lock(priv_to_devlink(fbd));
+
+ fbnic_fw_free_mbx(fbd);
+
+ devl_unlock(priv_to_devlink(fbd));
/* Free the IRQs so they aren't trying to occupy sleeping CPUs */
fbnic_free_irqs(fbd);
@@ -419,11 +425,15 @@ static int __fbnic_pm_resume(struct device *dev)
fbd->mac->init_regs(fbd);
+ devl_lock(priv_to_devlink(fbd));
+
/* Re-enable mailbox */
- err = fbnic_fw_enable_mbx(fbd);
+ err = fbnic_fw_request_mbx(fbd);
if (err)
goto err_free_irqs;
+ devl_unlock(priv_to_devlink(fbd));
+
/* No netdev means there isn't a network interface to bring up */
if (fbnic_init_failure(fbd))
return 0;
@@ -438,15 +448,15 @@ static int __fbnic_pm_resume(struct device *dev)
if (netif_running(netdev)) {
err = __fbnic_open(fbn);
if (err)
- goto err_disable_mbx;
+ goto err_free_mbx;
}
rtnl_unlock();
return 0;
-err_disable_mbx:
+err_free_mbx:
rtnl_unlock();
- fbnic_fw_disable_mbx(fbd);
+ fbnic_fw_free_mbx(fbd);
err_free_irqs:
fbnic_free_irqs(fbd);
err_invalidate_uc_addr:
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
index 1a5e1e719b30..860b02b22c15 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -15,7 +15,7 @@ fbnic_pcs_to_net(struct phylink_pcs *pcs)
}
static void
-fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs,
+fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
@@ -133,7 +133,6 @@ int fbnic_phylink_init(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
struct phylink *phylink;
- fbn->phylink_pcs.neg_mode = true;
fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops;
fbn->phylink_config.dev = &netdev->dev;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index 908c098cd59e..8ff07b5562e3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <net/ipv6.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
@@ -13,10 +14,11 @@ void fbnic_reset_indir_tbl(struct fbnic_net *fbn)
unsigned int num_rx = fbn->num_rx_queues;
unsigned int i;
- for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ if (netif_is_rxfh_configured(fbn->netdev))
+ return;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx);
- fbn->indir_tbl[1][i] = ethtool_rxfh_indir_default(i, num_rx);
- }
}
void fbnic_rss_key_fill(u32 *buffer)
@@ -59,7 +61,7 @@ void fbnic_rss_disable_hw(struct fbnic_dev *fbd)
#define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \
FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \
FIELD_GET(RXH_##_fh, _val))
-static u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
+u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
{
u32 flow_hash = fbn->rss_flow_hash[flow_type];
u32 rss_en_mask = 0;
@@ -697,6 +699,359 @@ void fbnic_write_tce_tcam(struct fbnic_dev *fbd)
__fbnic_write_tce_tcam(fbd);
}
+struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in_addr *addr,
+ const struct in_addr *mask)
+{
+ struct fbnic_ip_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from top of list to bottom, filling bottom up. */
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
+ struct in6_addr *m = &ip_addr->mask;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = ip_addr;
+ continue;
+ }
+
+ if (ip_addr->version != 4)
+ continue;
+
+ /* Drop avail_addr if mask is a subset of our current mask,
+ * This prevents us from inserting a longer prefix behind a
+ * shorter one.
+ *
+ * The mask is stored inverted value so as an example:
+ * m ffff ffff ffff ffff ffff ffff ffff 0000 0000
+ * mask 0000 0000 0000 0000 0000 0000 0000 ffff ffff
+ *
+ * "m" and "mask" represent typical IPv4 mask stored in
+ * the TCAM and those provided by the stack. The code below
+ * should return a non-zero result if there is a 0 stored
+ * anywhere in "m" where "mask" has a 0.
+ */
+ if (~m->s6_addr32[3] & ~mask->s_addr) {
+ avail_addr = NULL;
+ continue;
+ }
+
+ /* Check to see if the mask actually contains fewer bits than
+ * our new mask "m". The XOR below should only result in 0 if
+ * "m" is masking a bit that we are looking for in our new
+ * "mask", we eliminated the 0^0 case with the check above.
+ *
+ * If it contains fewer bits we need to stop here, otherwise
+ * we might be adding an unreachable rule.
+ */
+ if (~(m->s6_addr32[3] ^ mask->s_addr))
+ break;
+
+ if (ip_addr->value.s6_addr32[3] == addr->s_addr) {
+ avail_addr = ip_addr;
+ break;
+ }
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ipv6_addr_set(&avail_addr->value, 0, 0, 0, addr->s_addr);
+ ipv6_addr_set(&avail_addr->mask, htonl(~0), htonl(~0),
+ htonl(~0), ~mask->s_addr);
+ avail_addr->version = 4;
+
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask)
+{
+ struct fbnic_ip_addr *avail_addr = NULL;
+ unsigned int i;
+
+ ip_addr = &ip_addr[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES - 1];
+
+ /* Scan from bottom of list to top, filling top down. */
+ for (i = FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i--; ip_addr--) {
+ struct in6_addr *m = &ip_addr->mask;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = ip_addr;
+ continue;
+ }
+
+ if (ip_addr->version != 6)
+ continue;
+
+ /* Drop avail_addr if mask is a superset of our current mask.
+ * This prevents us from inserting a longer prefix behind a
+ * shorter one.
+ *
+ * The mask is stored inverted value so as an example:
+ * m 0000 0000 0000 0000 0000 0000 0000 0000 0000
+ * mask ffff ffff ffff ffff ffff ffff ffff ffff ffff
+ *
+ * "m" and "mask" represent typical IPv6 mask stored in
+ * the TCAM and those provided by the stack. The code below
+ * should return a non-zero result which will cause us
+ * to drop the avail_addr value that might be cached
+ * to prevent us from dropping a v6 address behind it.
+ */
+ if ((m->s6_addr32[0] & mask->s6_addr32[0]) |
+ (m->s6_addr32[1] & mask->s6_addr32[1]) |
+ (m->s6_addr32[2] & mask->s6_addr32[2]) |
+ (m->s6_addr32[3] & mask->s6_addr32[3])) {
+ avail_addr = NULL;
+ continue;
+ }
+
+ /* The previous test eliminated any overlap between the
+ * two values so now we need to check for gaps.
+ *
+ * If the mask is equal to our current mask then it should
+ * result with m ^ mask = ffff ffff, if however the value
+ * stored in m is bigger then we should see a 0 appear
+ * somewhere in the mask.
+ */
+ if (~(m->s6_addr32[0] ^ mask->s6_addr32[0]) |
+ ~(m->s6_addr32[1] ^ mask->s6_addr32[1]) |
+ ~(m->s6_addr32[2] ^ mask->s6_addr32[2]) |
+ ~(m->s6_addr32[3] ^ mask->s6_addr32[3]))
+ break;
+
+ if (ipv6_addr_cmp(&ip_addr->value, addr))
+ continue;
+
+ avail_addr = ip_addr;
+ break;
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ memcpy(&avail_addr->value, addr, sizeof(*addr));
+ ipv6_addr_set(&avail_addr->mask,
+ ~mask->s6_addr32[0], ~mask->s6_addr32[1],
+ ~mask->s6_addr32[2], ~mask->s6_addr32[3]);
+ avail_addr->version = 6;
+
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx)
+{
+ if (!test_and_clear_bit(tcam_idx, ip_addr->act_tcam))
+ return -ENOENT;
+
+ if (bitmap_empty(ip_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ ip_addr->state = FBNIC_TCAM_S_DELETE;
+
+ return 0;
+}
+
+static void fbnic_clear_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), 0);
+}
+
+static void fbnic_clear_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), 0);
+}
+
+static void fbnic_clear_ip_outer_src_entry(struct fbnic_dev *fbd,
+ unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), 0);
+}
+
+static void fbnic_clear_ip_outer_dst_entry(struct fbnic_dev *fbd,
+ unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), 0);
+}
+
+static void fbnic_write_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ /* Bit 129 is used to flag for v4/v6 */
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
+ (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ /* Bit 129 is used to flag for v4/v6 */
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
+ (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_outer_src_entry(struct fbnic_dev *fbd,
+ unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_outer_dst_entry(struct fbnic_dev *fbd,
+ unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+void fbnic_write_ip_addr(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->ip_src); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ip_src[idx];
+
+ /* Check if update flag is set else skip. */
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ /* Clear by writing 0s. */
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ /* Invalidate entry and clear addr state info */
+ fbnic_clear_ip_src_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_src_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ /* Repeat process for other IP TCAMs */
+ for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ip_dst[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_dst_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_dst_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ipo_src[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_outer_src_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_outer_src_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ipo_dst[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_outer_dst_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_outer_dst_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+}
+
void fbnic_clear_rules(struct fbnic_dev *fbd)
{
u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
index 0d8285fa5b45..6892414195c3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
@@ -7,6 +7,8 @@
#include <uapi/linux/in6.h>
#include <linux/bitfield.h>
+struct in_addr;
+
/* The TCAM state definitions follow an expected ordering.
* They start out disabled, then move through the following states:
* Disabled 0 -> Add 2
@@ -32,6 +34,12 @@ enum {
#define FBNIC_RPC_TCAM_MACDA_WORD_LEN 3
#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32
+/* 8 IPSRC and IPDST TCAM Entries each
+ * 8 registers, Validate each
+ */
+#define FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN 8
+#define FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES 8
+
#define FBNIC_RPC_TCAM_ACT_WORD_LEN 11
#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
@@ -47,6 +55,13 @@ struct fbnic_mac_addr {
DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
};
+struct fbnic_ip_addr {
+ struct in6_addr mask, value;
+ unsigned char version;
+ unsigned char state;
+ DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
+};
+
struct fbnic_act_tcam {
struct {
u16 tcam[FBNIC_RPC_TCAM_ACT_WORD_LEN];
@@ -81,6 +96,11 @@ enum {
#define FBNIC_RPC_ACT_TBL_BMC_OFFSET 0
#define FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET 1
+/* This should leave us with 48 total entries in the TCAM that can be used
+ * for NFC after also deducting the 14 needed for RSS table programming.
+ */
+#define FBNIC_RPC_ACT_TBL_NFC_OFFSET 2
+
/* We reserve the last 14 entries for RSS rules on the host. The BMC
* unicast rule will need to be populated above these and is expected to
* use MACDA TCAM entry 23 to store the BMC MAC address.
@@ -88,6 +108,9 @@ enum {
#define FBNIC_RPC_ACT_TBL_RSS_OFFSET \
(FBNIC_RPC_ACT_TBL_NUM_ENTRIES - FBNIC_RSS_EN_NUM_ENTRIES)
+#define FBNIC_RPC_ACT_TBL_NFC_ENTRIES \
+ (FBNIC_RPC_ACT_TBL_RSS_OFFSET - FBNIC_RPC_ACT_TBL_NFC_OFFSET)
+
/* Flags used to identify the owner for this MAC filter. Note that any
* flags set for Broadcast thru Promisc indicate that the rule belongs
* to the RSS filters for the host.
@@ -168,6 +191,7 @@ void fbnic_rss_init_en_mask(struct fbnic_net *fbn);
void fbnic_rss_disable_hw(struct fbnic_dev *fbd);
void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn);
void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type);
int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx);
struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
@@ -177,6 +201,17 @@ struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
void fbnic_sift_macda(struct fbnic_dev *fbd);
void fbnic_write_macda(struct fbnic_dev *fbd);
+struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in_addr *addr,
+ const struct in_addr *mask);
+struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask);
+int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx);
+void fbnic_write_ip_addr(struct fbnic_dev *fbd);
+
static inline int __fbnic_uc_unsync(struct fbnic_mac_addr *mac_addr)
{
return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_UNICAST);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
index 2a174ab062a3..517ed8b2f1cb 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
@@ -196,13 +196,17 @@ int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
/**
* fbnic_tlv_attr_get_unsigned - Retrieve unsigned value from result
* @attr: Attribute to retrieve data from
+ * @def: The default value if attr is NULL
*
* Return: unsigned 64b value containing integer value
**/
-u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def)
{
__le64 le64_value = 0;
+ if (!attr)
+ return def;
+
memcpy(&le64_value, &attr->value[0],
le16_to_cpu(attr->hdr.len) - sizeof(*attr));
@@ -212,15 +216,21 @@ u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
/**
* fbnic_tlv_attr_get_signed - Retrieve signed value from result
* @attr: Attribute to retrieve data from
+ * @def: The default value if attr is NULL
*
* Return: signed 64b value containing integer value
**/
-s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def)
{
- int shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
__le64 le64_value = 0;
+ int shift;
s64 value;
+ if (!attr)
+ return def;
+
+ shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
+
/* Copy the value and adjust for byte ordering */
memcpy(&le64_value, &attr->value[0],
le16_to_cpu(attr->hdr.len) - sizeof(*attr));
@@ -233,19 +243,40 @@ s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
/**
* fbnic_tlv_attr_get_string - Retrieve string value from result
* @attr: Attribute to retrieve data from
- * @str: Pointer to an allocated string to store the data
- * @max_size: The maximum size which can be in str
+ * @dst: Pointer to an allocated string to store the data
+ * @dstsize: The maximum size which can be in dst
*
- * Return: the size of the string read from firmware
+ * Return: the size of the string read from firmware or negative error.
**/
-size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
- size_t max_size)
+ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst,
+ size_t dstsize)
{
- max_size = min_t(size_t, max_size,
- (le16_to_cpu(attr->hdr.len) * 4) - sizeof(*attr));
- memcpy(str, &attr->value, max_size);
+ size_t srclen, len;
+ ssize_t ret;
+
+ if (!attr)
+ return -EINVAL;
+
+ if (dstsize == 0)
+ return -E2BIG;
+
+ srclen = le16_to_cpu(attr->hdr.len) - sizeof(*attr);
+ if (srclen > 0 && ((char *)attr->value)[srclen - 1] == '\0')
+ srclen--;
+
+ if (srclen >= dstsize) {
+ len = dstsize - 1;
+ ret = -E2BIG;
+ } else {
+ len = srclen;
+ ret = len;
+ }
+
+ memcpy(dst, &attr->value, len);
+ /* Zero pad end of dst. */
+ memset(dst + len, 0, dstsize - len);
- return max_size;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
index 67300ab44353..c34bf87eeec9 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
@@ -114,34 +114,10 @@ static inline bool fbnic_tlv_attr_get_bool(struct fbnic_tlv_msg *attr)
return !!attr;
}
-u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr);
-s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr);
-size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
- size_t max_size);
-
-#define get_unsigned_result(id, location) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- location = fbnic_tlv_attr_get_unsigned(result); \
-} while (0)
-
-#define get_signed_result(id, location) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- location = fbnic_tlv_attr_get_signed(result); \
-} while (0)
-
-#define get_string_result(id, size, str, max_size) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- size = fbnic_tlv_attr_get_string(result, str, max_size); \
-} while (0)
-
-#define get_bool(id) (!!(results[id]))
-
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def);
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def);
+ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst,
+ size_t dstsize);
struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id);
int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id);
int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
@@ -170,6 +146,13 @@ int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
const struct fbnic_tlv_parser *parser);
int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results);
+#define fta_get_uint(_results, _id) \
+ fbnic_tlv_attr_get_unsigned(_results[_id], 0)
+#define fta_get_sint(_results, _id) \
+ fbnic_tlv_attr_get_signed(_results[_id], 0)
+#define fta_get_str(_results, _id, _dst, _dstsize) \
+ fbnic_tlv_attr_get_string(_results[_id], _dst, _dstsize)
+
#define FBNIC_TLV_MSG_ERROR \
FBNIC_TLV_PARSER(UNKNOWN, NULL, fbnic_tlv_parser_error)
#endif /* _FBNIC_TLV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index b5050fabe8fe..ac11389a764c 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <net/netdev_queues.h>
#include <net/page_pool/helpers.h>
+#include <net/tcp.h>
#include "fbnic.h"
#include "fbnic_csr.h"
@@ -18,6 +19,7 @@ enum {
struct fbnic_xmit_cb {
u32 bytecount;
+ u16 gso_segs;
u8 desc_count;
u8 flags;
int hw_head;
@@ -113,6 +115,11 @@ static int fbnic_maybe_stop_tx(const struct net_device *dev,
res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size,
FBNIC_TX_DESC_WAKEUP);
+ if (!res) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.stop++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
return !res;
}
@@ -174,8 +181,72 @@ static bool fbnic_tx_tstamp(struct sk_buff *skb)
}
static bool
+fbnic_tx_lso(struct fbnic_ring *ring, struct sk_buff *skb,
+ struct skb_shared_info *shinfo, __le64 *meta,
+ unsigned int *l2len, unsigned int *i3len)
+{
+ unsigned int l3_type, l4_type, l4len, hdrlen;
+ unsigned char *l4hdr;
+ __be16 payload_len;
+
+ if (unlikely(skb_cow_head(skb, 0)))
+ return true;
+
+ if (shinfo->gso_type & SKB_GSO_PARTIAL) {
+ l3_type = FBNIC_TWD_L3_TYPE_OTHER;
+ } else if (!skb->encapsulation) {
+ if (ip_hdr(skb)->version == 4)
+ l3_type = FBNIC_TWD_L3_TYPE_IPV4;
+ else
+ l3_type = FBNIC_TWD_L3_TYPE_IPV6;
+ } else {
+ unsigned int o3len;
+
+ o3len = skb_inner_network_header(skb) - skb_network_header(skb);
+ *i3len -= o3len;
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_OHLEN_MASK,
+ o3len / 2));
+ l3_type = FBNIC_TWD_L3_TYPE_V6V6;
+ }
+
+ l4hdr = skb_checksum_start(skb);
+ payload_len = cpu_to_be16(skb->len - (l4hdr - skb->data));
+
+ if (shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ struct tcphdr *tcph = (struct tcphdr *)l4hdr;
+
+ l4_type = FBNIC_TWD_L4_TYPE_TCP;
+ l4len = __tcp_hdrlen((struct tcphdr *)l4hdr);
+ csum_replace_by_diff(&tcph->check, (__force __wsum)payload_len);
+ } else {
+ struct udphdr *udph = (struct udphdr *)l4hdr;
+
+ l4_type = FBNIC_TWD_L4_TYPE_UDP;
+ l4len = sizeof(struct udphdr);
+ csum_replace_by_diff(&udph->check, (__force __wsum)payload_len);
+ }
+
+ hdrlen = (l4hdr - skb->data) + l4len;
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_TYPE_MASK, l3_type) |
+ FIELD_PREP(FBNIC_TWD_L4_TYPE_MASK, l4_type) |
+ FIELD_PREP(FBNIC_TWD_L4_HLEN_MASK, l4len / 4) |
+ FIELD_PREP(FBNIC_TWD_MSS_MASK, shinfo->gso_size) |
+ FBNIC_TWD_FLAG_REQ_LSO);
+
+ FBNIC_XMIT_CB(skb)->bytecount += (shinfo->gso_segs - 1) * hdrlen;
+ FBNIC_XMIT_CB(skb)->gso_segs = shinfo->gso_segs;
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.lso += shinfo->gso_segs;
+ u64_stats_update_end(&ring->stats.syncp);
+
+ return false;
+}
+
+static bool
fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int l2len, i3len;
if (fbnic_tx_tstamp(skb))
@@ -190,7 +261,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK,
skb->csum_offset / 2));
- *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+ if (shinfo->gso_size) {
+ if (fbnic_tx_lso(ring, skb, shinfo, meta, &l2len, &i3len))
+ return true;
+ } else {
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.csum_partial++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) |
FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2));
@@ -198,12 +277,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
}
static void
-fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
+fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq,
+ u64 *csum_cmpl, u64 *csum_none)
{
skb_checksum_none_assert(skb);
- if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ (*csum_none)++;
return;
+ }
if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -212,6 +294,7 @@ fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = (__force __wsum)csum;
+ (*csum_cmpl)++;
}
}
@@ -329,7 +412,9 @@ fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
/* Write all members within DWORD to condense this into 2 4B writes */
FBNIC_XMIT_CB(skb)->bytecount = skb->len;
+ FBNIC_XMIT_CB(skb)->gso_segs = 1;
FBNIC_XMIT_CB(skb)->desc_count = 0;
+ FBNIC_XMIT_CB(skb)->flags = 0;
if (fbnic_tx_offloads(ring, skb, meta))
goto err_free;
@@ -356,6 +441,59 @@ netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev)
return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]);
}
+static netdev_features_t
+fbnic_features_check_encap_gso(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features, unsigned int l3len)
+{
+ netdev_features_t skb_gso_features;
+ struct ipv6hdr *ip6_hdr;
+ unsigned char l4_hdr;
+ unsigned int start;
+ __be16 frag_off;
+
+ /* Require MANGLEID for GSO_PARTIAL of IPv4.
+ * In theory we could support TSO with single, innermost v4 header
+ * by pretending everything before it is L2, but that needs to be
+ * parsed case by case.. so leaving it for when the need arises.
+ */
+ if (!(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ skb_gso_features = skb_shinfo(skb)->gso_type;
+ skb_gso_features <<= NETIF_F_GSO_SHIFT;
+
+ /* We'd only clear the native GSO features, so don't bother validating
+ * if the match can only be on those supported thru GSO_PARTIAL.
+ */
+ if (!(skb_gso_features & FBNIC_TUN_GSO_FEATURES))
+ return features;
+
+ /* We can only do IPv6-in-IPv6, not v4-in-v6. It'd be nice
+ * to fall back to partial for this, or any failure below.
+ * This is just an optimization, UDPv4 will be caught later on.
+ */
+ if (skb_gso_features & NETIF_F_TSO)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ /* Inner headers multiple of 2 */
+ if ((skb_inner_network_header(skb) - skb_network_header(skb)) % 2)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ /* Encapsulated GSO packet, make 100% sure it's IPv6-in-IPv6. */
+ ip6_hdr = ipv6_hdr(skb);
+ if (ip6_hdr->version != 6)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ l4_hdr = ip6_hdr->nexthdr;
+ start = (unsigned char *)ip6_hdr - skb->data + sizeof(struct ipv6hdr);
+ start = ipv6_skip_exthdr(skb, start, &l4_hdr, &frag_off);
+ if (frag_off || l4_hdr != IPPROTO_IPV6 ||
+ skb->data + start != skb_inner_network_header(skb))
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ return features;
+}
+
netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
@@ -376,9 +514,12 @@ fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
!FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) ||
!FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) ||
!FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2))
- return features & ~NETIF_F_CSUM_MASK;
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
- return features;
+ if (likely(!skb->encapsulation) || !skb_is_gso(skb))
+ return features;
+
+ return fbnic_features_check_encap_gso(skb, dev, features, l3len);
}
static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
@@ -429,7 +570,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
}
total_bytes += FBNIC_XMIT_CB(skb)->bytecount;
- total_packets += 1;
+ total_packets += FBNIC_XMIT_CB(skb)->gso_segs;
napi_consume_skb(skb, napi_budget);
}
@@ -444,7 +585,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
if (unlikely(discard)) {
u64_stats_update_begin(&ring->stats.syncp);
ring->stats.dropped += total_packets;
- ring->stats.ts_lost += ts_lost;
+ ring->stats.twq.ts_lost += ts_lost;
u64_stats_update_end(&ring->stats.syncp);
netdev_tx_completed_queue(txq, total_packets, total_bytes);
@@ -456,9 +597,13 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
ring->stats.packets += total_packets;
u64_stats_update_end(&ring->stats.syncp);
- netif_txq_completed_wake(txq, total_packets, total_bytes,
- fbnic_desc_unused(ring),
- FBNIC_TX_DESC_WAKEUP);
+ if (!netif_txq_completed_wake(txq, total_packets, total_bytes,
+ fbnic_desc_unused(ring),
+ FBNIC_TX_DESC_WAKEUP)) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.wake++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
}
static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
@@ -507,7 +652,7 @@ static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
skb_tstamp_tx(skb, &hwtstamp);
u64_stats_update_begin(&ring->stats.syncp);
- ring->stats.ts_packets++;
+ ring->stats.twq.ts_packets++;
u64_stats_update_end(&ring->stats.syncp);
}
@@ -661,8 +806,13 @@ static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
struct page *page;
page = page_pool_dev_alloc_pages(nv->page_pool);
- if (!page)
+ if (!page) {
+ u64_stats_update_begin(&bdq->stats.syncp);
+ bdq->stats.rx.alloc_failed++;
+ u64_stats_update_end(&bdq->stats.syncp);
+
break;
+ }
fbnic_page_pool_init(bdq, i, page);
fbnic_bd_prep(bdq, i, page);
@@ -875,12 +1025,13 @@ static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd,
static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
u64 rcd, struct sk_buff *skb,
- struct fbnic_q_triad *qt)
+ struct fbnic_q_triad *qt,
+ u64 *csum_cmpl, u64 *csum_none)
{
struct net_device *netdev = nv->napi.dev;
struct fbnic_ring *rcq = &qt->cmpl;
- fbnic_rx_csum(rcd, skb, rcq);
+ fbnic_rx_csum(rcd, skb, rcq, csum_cmpl, csum_none);
if (netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
@@ -898,7 +1049,8 @@ static bool fbnic_rcd_metadata_err(u64 rcd)
static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt, int budget)
{
- unsigned int packets = 0, bytes = 0, dropped = 0;
+ unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0;
+ u64 csum_complete = 0, csum_none = 0;
struct fbnic_ring *rcq = &qt->cmpl;
struct fbnic_pkt_buff *pkt;
s32 head0 = -1, head1 = -1;
@@ -947,14 +1099,22 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* Populate skb and invalidate XDP */
if (!IS_ERR_OR_NULL(skb)) {
- fbnic_populate_skb_fields(nv, rcd, skb, qt);
+ fbnic_populate_skb_fields(nv, rcd, skb, qt,
+ &csum_complete,
+ &csum_none);
packets++;
bytes += skb->len;
napi_gro_receive(&nv->napi, skb);
} else {
- dropped++;
+ if (!skb) {
+ alloc_failed++;
+ dropped++;
+ } else {
+ dropped++;
+ }
+
fbnic_put_pkt_buff(nv, pkt, 1);
}
@@ -977,6 +1137,9 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* Re-add ethernet header length (removed in fbnic_build_skb) */
rcq->stats.bytes += ETH_HLEN * packets;
rcq->stats.dropped += dropped;
+ rcq->stats.rx.alloc_failed += alloc_failed;
+ rcq->stats.rx.csum_complete += csum_complete;
+ rcq->stats.rx.csum_none += csum_none;
u64_stats_update_end(&rcq->stats.syncp);
/* Unmap and free processed buffers */
@@ -1033,20 +1196,20 @@ static int fbnic_poll(struct napi_struct *napi, int budget)
if (likely(napi_complete_done(napi, work_done)))
fbnic_nv_irq_rearm(nv);
- return 0;
+ return work_done;
}
-static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
+irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
{
- struct fbnic_napi_vector *nv = data;
+ struct fbnic_napi_vector *nv = *(void **)data;
napi_schedule_irqoff(&nv->napi);
return IRQ_HANDLED;
}
-static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
- struct fbnic_ring *rxr)
+void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
{
struct fbnic_queue_stats *stats = &rxr->stats;
@@ -1054,10 +1217,15 @@ static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
fbn->rx_stats.bytes += stats->bytes;
fbn->rx_stats.packets += stats->packets;
fbn->rx_stats.dropped += stats->dropped;
+ fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed;
+ fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete;
+ fbn->rx_stats.rx.csum_none += stats->rx.csum_none;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 3);
}
-static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
- struct fbnic_ring *txr)
+void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
{
struct fbnic_queue_stats *stats = &txr->stats;
@@ -1065,8 +1233,14 @@ static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
fbn->tx_stats.bytes += stats->bytes;
fbn->tx_stats.packets += stats->packets;
fbn->tx_stats.dropped += stats->dropped;
- fbn->tx_stats.ts_lost += stats->ts_lost;
- fbn->tx_stats.ts_packets += stats->ts_packets;
+ fbn->tx_stats.twq.csum_partial += stats->twq.csum_partial;
+ fbn->tx_stats.twq.lso += stats->twq.lso;
+ fbn->tx_stats.twq.ts_lost += stats->twq.ts_lost;
+ fbn->tx_stats.twq.ts_packets += stats->twq.ts_packets;
+ fbn->tx_stats.twq.stop += stats->twq.stop;
+ fbn->tx_stats.twq.wake += stats->twq.wake;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6);
}
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
@@ -1099,7 +1273,6 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
struct fbnic_napi_vector *nv)
{
struct fbnic_dev *fbd = nv->fbd;
- u32 v_idx = nv->v_idx;
int i, j;
for (i = 0; i < nv->txt_count; i++) {
@@ -1113,31 +1286,20 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
}
- fbnic_free_irq(fbd, v_idx, nv);
+ fbnic_napi_free_irq(fbd, nv);
page_pool_destroy(nv->page_pool);
netif_napi_del(&nv->napi);
- list_del(&nv->napis);
+ fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
}
void fbnic_free_napi_vectors(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv, *temp;
-
- list_for_each_entry_safe(nv, temp, &fbn->napis, napis)
- fbnic_free_napi_vector(fbn, nv);
-}
-
-static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv)
-{
- unsigned char *dev_name = nv->napi.dev->name;
+ int i;
- if (!nv->rxt_count)
- snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name,
- nv->v_idx - FBNIC_NON_NAPI_VECTORS);
- else
- snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name,
- nv->v_idx - FBNIC_NON_NAPI_VECTORS);
+ for (i = 0; i < fbn->num_napi; i++)
+ if (fbn->napi[i])
+ fbnic_free_napi_vector(fbn, fbn->napi[i]);
}
#define FBNIC_PAGE_POOL_FLAGS \
@@ -1154,7 +1316,9 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
.dev = nv->dev,
.dma_dir = DMA_BIDIRECTIONAL,
.offset = 0,
- .max_len = PAGE_SIZE
+ .max_len = PAGE_SIZE,
+ .napi = &nv->napi,
+ .netdev = fbn->netdev,
};
struct page_pool *pp;
@@ -1222,7 +1386,7 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
nv->v_idx = v_idx;
/* Tie napi to netdev */
- list_add(&nv->napis, &fbn->napis);
+ fbn->napi[fbnic_napi_idx(nv)] = nv;
netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
/* Record IRQ to NAPI struct */
@@ -1239,12 +1403,8 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
goto napi_del;
}
- /* Initialize vector name */
- fbnic_name_napi_vector(nv);
-
/* Request the IRQ for napi vector */
- err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings,
- IRQF_SHARED, nv->name, nv);
+ err = fbnic_napi_request_irq(fbd, nv);
if (err)
goto pp_destroy;
@@ -1307,7 +1467,7 @@ pp_destroy:
page_pool_destroy(nv->page_pool);
napi_del:
netif_napi_del(&nv->napi);
- list_del(&nv->napis);
+ fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
return err;
}
@@ -1612,19 +1772,18 @@ free_resources:
void fbnic_free_resources(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis)
- fbnic_free_nv_resources(fbn, nv);
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_free_nv_resources(fbn, fbn->napi[i]);
}
int fbnic_alloc_resources(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
- int err = -ENODEV;
+ int i, err = -ENODEV;
- list_for_each_entry(nv, &fbn->napis, napis) {
- err = fbnic_alloc_nv_resources(fbn, nv);
+ for (i = 0; i < fbn->num_napi; i++) {
+ err = fbnic_alloc_nv_resources(fbn, fbn->napi[i]);
if (err)
goto free_resources;
}
@@ -1632,12 +1791,77 @@ int fbnic_alloc_resources(struct fbnic_net *fbn)
return 0;
free_resources:
- list_for_each_entry_continue_reverse(nv, &fbn->napis, napis)
- fbnic_free_nv_resources(fbn, nv);
+ while (i--)
+ fbnic_free_nv_resources(fbn, fbn->napi[i]);
return err;
}
+static void fbnic_set_netif_napi(struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Associate Tx queue with NAPI */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, &nv->napi);
+ }
+
+ /* Associate Rx queue with NAPI */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, &nv->napi);
+ }
+}
+
+static void fbnic_reset_netif_napi(struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Disassociate Tx queue from NAPI */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
+
+ /* Disassociate Rx queue from NAPI */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+}
+
+int fbnic_set_netif_queues(struct fbnic_net *fbn)
+{
+ int i, err;
+
+ err = netif_set_real_num_queues(fbn->netdev, fbn->num_tx_queues,
+ fbn->num_rx_queues);
+ if (err)
+ return err;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_set_netif_napi(fbn->napi[i]);
+
+ return 0;
+}
+
+void fbnic_reset_netif_queues(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_reset_netif_napi(fbn->napi[i]);
+}
+
static void fbnic_disable_twq0(struct fbnic_ring *txr)
{
u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL);
@@ -1670,33 +1894,34 @@ static void fbnic_disable_rcq(struct fbnic_ring *rxr)
void fbnic_napi_disable(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
- napi_disable(&nv->napi);
+ for (i = 0; i < fbn->num_napi; i++) {
+ napi_disable(&fbn->napi[i]->napi);
- fbnic_nv_irq_disable(nv);
+ fbnic_nv_irq_disable(fbn->napi[i]);
}
}
void fbnic_disable(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i, j, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Disable Tx queue triads */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_disable_twq0(&qt->sub0);
fbnic_disable_tcq(&qt->cmpl);
}
/* Disable Rx queue triads */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_disable_bdq(&qt->sub0, &qt->sub1);
fbnic_disable_rcq(&qt->cmpl);
@@ -1792,14 +2017,15 @@ int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
void fbnic_flush(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
- int i, j;
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+ int j, t;
/* Flush any processed Tx Queue Triads and drop the rest */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
struct netdev_queue *tx_queue;
/* Clean the work queues of unprocessed work */
@@ -1816,15 +2042,11 @@ void fbnic_flush(struct fbnic_net *fbn)
tx_queue = netdev_get_tx_queue(nv->napi.dev,
qt->sub0.q_idx);
netdev_tx_reset_queue(tx_queue);
-
- /* Disassociate Tx queue from NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
- NETDEV_QUEUE_TYPE_TX, NULL);
}
/* Flush any processed Rx Queue Triads and drop the rest */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
/* Clean the work queues of unprocessed work */
fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail);
@@ -1835,43 +2057,23 @@ void fbnic_flush(struct fbnic_net *fbn)
fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0);
qt->cmpl.pkt->buff.data_hard_start = NULL;
-
- /* Disassociate Rx queue from NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
- NETDEV_QUEUE_TYPE_RX, NULL);
}
}
}
void fbnic_fill(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
-
- list_for_each_entry(nv, &fbn->napis, napis) {
- int i, j;
-
- /* Configure NAPI mapping for Tx */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
-
- /* Nothing to do if Tx queue is disabled */
- if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
- continue;
+ int i;
- /* Associate Tx queue with NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
- NETDEV_QUEUE_TYPE_TX, &nv->napi);
- }
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+ int j, t;
/* Configure NAPI mapping and populate pages
* in the BDQ rings to use for Rx
*/
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
-
- /* Associate Rx queue with NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
- NETDEV_QUEUE_TYPE_RX, &nv->napi);
+ for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
/* Populate the header and payload BDQs */
fbnic_fill_bdq(nv, &qt->sub0);
@@ -1984,9 +2186,51 @@ static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
}
+static void fbnic_config_rim_threshold(struct fbnic_ring *rcq, u16 nv_idx, u32 rx_desc)
+{
+ u32 threshold;
+
+ /* Set the threhsold to half the ring size if rx_frames
+ * is not configured
+ */
+ threshold = rx_desc ? : rcq->size_mask / 2;
+
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv_idx);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, threshold);
+}
+
+void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 val = arm;
+
+ val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT, fbn->rx_usecs) |
+ FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN;
+ val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT, fbn->tx_usecs) |
+ FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+}
+
+void fbnic_config_rx_frames(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ int i;
+
+ for (i = nv->txt_count; i < nv->rxt_count + nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_config_rim_threshold(&qt->cmpl, nv->v_idx,
+ fbn->rx_max_frames *
+ FBNIC_MIN_RXD_PER_FRAME);
+ }
+}
+
static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
struct fbnic_ring *rcq)
{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
u32 log_size = fls(rcq->size_mask);
u32 rcq_ctl;
@@ -2014,8 +2258,8 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf);
/* Store interrupt information for the completion queue */
- fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx);
- fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2);
+ fbnic_config_rim_threshold(rcq, nv->v_idx, fbn->rx_max_frames *
+ FBNIC_MIN_RXD_PER_FRAME);
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0);
/* Enable queue */
@@ -2025,21 +2269,23 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
void fbnic_enable(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+ int j, t;
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Setup Tx Queue Triads */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_enable_twq0(&qt->sub0);
fbnic_enable_tcq(nv, &qt->cmpl);
}
/* Setup Rx Queue Triads */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_enable_bdq(&qt->sub0, &qt->sub1);
fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
@@ -2052,22 +2298,18 @@ void fbnic_enable(struct fbnic_net *fbn)
static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv)
{
- struct fbnic_dev *fbd = nv->fbd;
- u32 val;
-
- val = FBNIC_INTR_CQ_REARM_INTR_UNMASK;
-
- fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+ fbnic_config_txrx_usecs(nv, FBNIC_INTR_CQ_REARM_INTR_UNMASK);
}
void fbnic_napi_enable(struct fbnic_net *fbn)
{
u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
napi_enable(&nv->napi);
fbnic_nv_irq_enable(nv);
@@ -2096,17 +2338,18 @@ void fbnic_napi_depletion_check(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i, j, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Find RQs which are completely out of pages */
- for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) {
+ for (t = nv->txt_count, j = 0; j < nv->rxt_count; j++, t++) {
/* Assume 4 pages is always enough to fit a packet
* and therefore generate a completion and an IRQ.
*/
- if (fbnic_desc_used(&nv->qt[i].sub0) < 4 ||
- fbnic_desc_used(&nv->qt[i].sub1) < 4)
+ if (fbnic_desc_used(&nv->qt[t].sub0) < 4 ||
+ fbnic_desc_used(&nv->qt[t].sub1) < 4)
irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
}
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 8d626287c3f4..f46616af41ea 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -24,13 +24,29 @@ struct fbnic_net;
#define FBNIC_TX_DESC_WAKEUP (FBNIC_MAX_SKB_DESC * 2)
#define FBNIC_TX_DESC_MIN roundup_pow_of_two(FBNIC_TX_DESC_WAKEUP)
+/* To receive the worst case packet we need:
+ * 1 descriptor for primary metadata
+ * + 1 descriptor for optional metadata
+ * + 1 descriptor for headers
+ * + 4 descriptors for payload
+ */
+#define FBNIC_MAX_RX_PKT_DESC 7
+#define FBNIC_RX_DESC_MIN roundup_pow_of_two(FBNIC_MAX_RX_PKT_DESC * 2)
+
#define FBNIC_MAX_TXQS 128u
#define FBNIC_MAX_RXQS 128u
+/* These apply to TWQs, TCQ, RCQ */
+#define FBNIC_QUEUE_SIZE_MIN 16u
+#define FBNIC_QUEUE_SIZE_MAX SZ_64K
+
#define FBNIC_TXQ_SIZE_DEFAULT 1024
#define FBNIC_HPQ_SIZE_DEFAULT 256
#define FBNIC_PPQ_SIZE_DEFAULT 256
#define FBNIC_RCQ_SIZE_DEFAULT 1024
+#define FBNIC_TX_USECS_DEFAULT 35
+#define FBNIC_RX_USECS_DEFAULT 30
+#define FBNIC_RX_FRAMES_DEFAULT 0
#define FBNIC_RX_TROOM \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
@@ -56,9 +72,22 @@ struct fbnic_pkt_buff {
struct fbnic_queue_stats {
u64 packets;
u64 bytes;
+ union {
+ struct {
+ u64 csum_partial;
+ u64 lso;
+ u64 ts_packets;
+ u64 ts_lost;
+ u64 stop;
+ u64 wake;
+ } twq;
+ struct {
+ u64 alloc_failed;
+ u64 csum_complete;
+ u64 csum_none;
+ } rx;
+ };
u64 dropped;
- u64 ts_packets;
- u64 ts_lost;
struct u64_stats_sync syncp;
};
@@ -104,14 +133,11 @@ struct fbnic_napi_vector {
struct device *dev; /* Device for DMA unmapping */
struct page_pool *page_pool;
struct fbnic_dev *fbd;
- char name[IFNAMSIZ + 9];
u16 v_idx;
u8 txt_count;
u8 rxt_count;
- struct list_head napis;
-
struct fbnic_q_triad qt[];
};
@@ -123,10 +149,18 @@ netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features);
+void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr);
+void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr);
+
int fbnic_alloc_napi_vectors(struct fbnic_net *fbn);
void fbnic_free_napi_vectors(struct fbnic_net *fbn);
int fbnic_alloc_resources(struct fbnic_net *fbn);
void fbnic_free_resources(struct fbnic_net *fbn);
+int fbnic_set_netif_queues(struct fbnic_net *fbn);
+void fbnic_reset_netif_queues(struct fbnic_net *fbn);
+irqreturn_t fbnic_msix_clean_rings(int irq, void *data);
void fbnic_napi_enable(struct fbnic_net *fbn);
void fbnic_napi_disable(struct fbnic_net *fbn);
void fbnic_enable(struct fbnic_net *fbn);
@@ -137,4 +171,9 @@ void fbnic_fill(struct fbnic_net *fbn);
void fbnic_napi_depletion_check(struct net_device *netdev);
int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail);
+static inline int fbnic_napi_idx(const struct fbnic_napi_vector *nv)
+{
+ return nv->v_idx - FBNIC_NON_NAPI_VECTORS;
+}
+
#endif /* _FBNIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 3062cc0f9199..c862b13b447a 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -20,8 +20,6 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include "ks8851.h"
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index dc1d9f774565..cdde19b8edc4 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3951,7 +3951,7 @@ static void ksz_stop_timer(struct ksz_timer_info *info)
{
if (info->max) {
info->max = 0;
- del_timer_sync(&info->timer);
+ timer_delete_sync(&info->timer);
}
}
@@ -6304,7 +6304,8 @@ static void mib_read_work(struct work_struct *work)
static void mib_monitor(struct timer_list *t)
{
- struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer);
+ struct dev_info *hw_priv = timer_container_of(hw_priv, t,
+ mib_timer_info.timer);
mib_read_work(&hw_priv->mib_read);
@@ -6331,7 +6332,8 @@ static void mib_monitor(struct timer_list *t)
*/
static void dev_monitor(struct timer_list *t)
{
- struct dev_priv *priv = from_timer(priv, t, monitor_timer_info.timer);
+ struct dev_priv *priv = timer_container_of(priv, t,
+ monitor_timer_info.timer);
struct net_device *dev = priv->mii_if.dev;
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 73832fb2bc32..ee046468652c 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -59,7 +59,6 @@ config LAN743X
source "drivers/net/ethernet/microchip/lan865x/Kconfig"
source "drivers/net/ethernet/microchip/lan966x/Kconfig"
-source "drivers/net/ethernet/microchip/lan969x/Kconfig"
source "drivers/net/ethernet/microchip/sparx5/Kconfig"
source "drivers/net/ethernet/microchip/vcap/Kconfig"
source "drivers/net/ethernet/microchip/fdma/Kconfig"
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index 7770df82200f..3c65baed9fd8 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -11,7 +11,6 @@ lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
obj-$(CONFIG_LAN865X) += lan865x/
obj-$(CONFIG_LAN966X_SWITCH) += lan966x/
-obj-$(CONFIG_LAN969X_SWITCH) += lan969x/
obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
obj-$(CONFIG_VCAP) += vcap/
obj-$(CONFIG_FDMA) += fdma/
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 1a1cbd034eda..64a3b953cc17 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -18,6 +18,8 @@
#define EEPROM_MAC_OFFSET (0x01)
#define MAX_EEPROM_SIZE (512)
#define MAX_OTP_SIZE (1024)
+#define MAX_HS_OTP_SIZE (8 * 1024)
+#define MAX_HS_EEPROM_SIZE (64 * 1024)
#define OTP_INDICATOR_1 (0xF3)
#define OTP_INDICATOR_2 (0xF7)
@@ -272,6 +274,9 @@ static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset,
int ret;
int i;
+ if (offset + length > MAX_HS_OTP_SIZE)
+ return -EINVAL;
+
ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (ret < 0)
return ret;
@@ -320,6 +325,9 @@ static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset,
int ret;
int i;
+ if (offset + length > MAX_HS_OTP_SIZE)
+ return -EINVAL;
+
ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (ret < 0)
return ret;
@@ -497,6 +505,9 @@ static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_HS_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (retval < 0)
return retval;
@@ -539,6 +550,9 @@ static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_HS_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (retval < 0)
return retval;
@@ -604,9 +618,9 @@ static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
struct lan743x_adapter *adapter = netdev_priv(netdev);
if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
- return MAX_OTP_SIZE;
+ return adapter->is_pci11x1x ? MAX_HS_OTP_SIZE : MAX_OTP_SIZE;
- return MAX_EEPROM_SIZE;
+ return adapter->is_pci11x1x ? MAX_HS_EEPROM_SIZE : MAX_EEPROM_SIZE;
}
static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
@@ -1055,9 +1069,6 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- eee->tx_lpi_timer = lan743x_csr_read(adapter,
- MAC_EEE_TX_LPI_REQ_DLY_CNT);
-
return phylink_ethtool_get_eee(adapter->phylink, eee);
}
@@ -1065,24 +1076,6 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- u32 tx_lpi_timer;
-
- tx_lpi_timer = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
- if (tx_lpi_timer != eee->tx_lpi_timer) {
- u32 mac_cr = lan743x_csr_read(adapter, MAC_CR);
-
- /* Software should only change this field when Energy Efficient
- * Ethernet Enable (EEEEN) is cleared.
- * This function will trigger an autonegotiation restart and
- * eee will be reenabled during link up if eee was negotiated.
- */
- lan743x_mac_eee_enable(adapter, false);
- lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT,
- eee->tx_lpi_timer);
-
- if (mac_cr & MAC_CR_EEE_EN_)
- lan743x_mac_eee_enable(adapter, true);
- }
return phylink_ethtool_set_eee(adapter->phylink, eee);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 4dc5adcda6a3..9d70b51ca91d 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1330,7 +1330,7 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
}
/* PHY */
-static int lan743x_phy_reset(struct lan743x_adapter *adapter)
+static int lan743x_hw_reset_phy(struct lan743x_adapter *adapter)
{
u32 data;
@@ -1346,11 +1346,6 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
50000, 1000000);
}
-static int lan743x_phy_init(struct lan743x_adapter *adapter)
-{
- return lan743x_phy_reset(adapter);
-}
-
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
{
u32 id_rev;
@@ -1729,6 +1724,7 @@ int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter,
default:
return -ERANGE;
}
+ adapter->rx_tstamp_filter = rx_filter;
return 0;
}
@@ -1815,6 +1811,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
if (nr_frags <= 0) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ tx->frame_last = tx->frame_first;
}
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
@@ -1884,6 +1881,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
tx->frame_first = 0;
tx->frame_data0 = 0;
tx->frame_tail = 0;
+ tx->frame_last = 0;
return -ENOMEM;
}
@@ -1924,16 +1922,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
TX_DESC_DATA0_DTYPE_DATA_) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ tx->frame_last = tx->frame_tail;
}
- tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
- buffer_info = &tx->buffer_info[tx->frame_tail];
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
+ buffer_info = &tx->buffer_info[tx->frame_last];
buffer_info->skb = skb;
if (time_stamp)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
tx->last_tail = tx->frame_tail;
@@ -2495,8 +2495,7 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
/* save existing skb, allocate new skb and map to dma */
skb = buffer_info->skb;
- if (lan743x_rx_init_ring_element(rx, rx->last_head,
- GFP_ATOMIC | GFP_DMA)) {
+ if (lan743x_rx_init_ring_element(rx, rx->last_head, GFP_ATOMIC)) {
/* failed to allocate next skb.
* Memory is very low.
* Drop this packet and reuse buffer.
@@ -2966,7 +2965,7 @@ static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter)
return lan743x_pcs_power_reset(adapter);
}
-void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
+static void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
{
u32 mac_cr;
@@ -3027,10 +3026,8 @@ static void lan743x_phylink_mac_link_down(struct phylink_config *config,
phy_interface_t interface)
{
struct net_device *netdev = to_net_dev(config->dev);
- struct lan743x_adapter *adapter = netdev_priv(netdev);
- netif_tx_stop_all_queues(to_net_dev(config->dev));
- lan743x_mac_eee_enable(adapter, false);
+ netif_tx_stop_all_queues(netdev);
}
static void lan743x_phylink_mac_link_up(struct phylink_config *config,
@@ -3072,16 +3069,40 @@ static void lan743x_phylink_mac_link_up(struct phylink_config *config,
cap & FLOW_CTRL_TX,
cap & FLOW_CTRL_RX);
- if (phydev)
- lan743x_mac_eee_enable(adapter, phydev->enable_tx_lpi);
-
netif_tx_wake_all_queues(netdev);
}
+static void lan743x_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ lan743x_mac_eee_enable(adapter, false);
+}
+
+static int lan743x_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ /* Software should only change this field when Energy Efficient
+ * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
+ * EEEEN during probe, and phylink itself guarantees that
+ * mac_disable_tx_lpi() will have been previously called.
+ */
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, timer);
+ lan743x_mac_eee_enable(adapter, true);
+
+ return 0;
+}
+
static const struct phylink_mac_ops lan743x_phylink_mac_ops = {
.mac_config = lan743x_phylink_mac_config,
.mac_link_down = lan743x_phylink_mac_link_down,
.mac_link_up = lan743x_phylink_mac_link_up,
+ .mac_disable_tx_lpi = lan743x_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = lan743x_mac_enable_tx_lpi,
};
static int lan743x_phylink_create(struct lan743x_adapter *adapter)
@@ -3095,6 +3116,9 @@ static int lan743x_phylink_create(struct lan743x_adapter *adapter)
adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+ adapter->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
+ adapter->phylink_config.lpi_timer_default =
+ lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
lan743x_phy_interface_select(adapter);
@@ -3120,6 +3144,10 @@ static int lan743x_phylink_create(struct lan743x_adapter *adapter)
phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces);
}
+ memcpy(adapter->phylink_config.lpi_interfaces,
+ adapter->phylink_config.supported_interfaces,
+ sizeof(adapter->phylink_config.lpi_interfaces));
+
pl = phylink_create(&adapter->phylink_config, NULL,
adapter->phy_interface, &lan743x_phylink_mac_ops);
@@ -3319,8 +3347,6 @@ static int lan743x_netdev_ioctl(struct net_device *netdev,
if (!netif_running(netdev))
return -EINVAL;
- if (cmd == SIOCSHWTSTAMP)
- return lan743x_ptp_ioctl(netdev, ifr, cmd);
return phylink_mii_ioctl(adapter->phylink, ifr, cmd);
}
@@ -3415,6 +3441,8 @@ static const struct net_device_ops lan743x_netdev_ops = {
.ndo_change_mtu = lan743x_netdev_change_mtu,
.ndo_get_stats64 = lan743x_netdev_get_stats64,
.ndo_set_mac_address = lan743x_netdev_set_mac_address,
+ .ndo_hwtstamp_get = lan743x_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = lan743x_ptp_hwtstamp_set,
};
static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
@@ -3462,6 +3490,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
struct pci_dev *pdev)
{
struct lan743x_tx *tx;
+ u32 sgmii_ctl;
int index;
int ret;
@@ -3474,6 +3503,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
spin_lock_init(&adapter->eth_syslock_spinlock);
mutex_init(&adapter->sgmii_rw_lock);
pci11x1x_set_rfe_rd_fifo_threshold(adapter);
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ if (adapter->is_sgmii_en) {
+ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
+ } else {
+ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
+ }
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
} else {
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
@@ -3491,10 +3529,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
if (ret)
return ret;
- ret = lan743x_phy_init(adapter);
- if (ret)
- return ret;
-
ret = lan743x_ptp_init(adapter);
if (ret)
return ret;
@@ -3517,12 +3551,14 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
spin_lock_init(&tx->ring_lock);
}
+ /* Ensure EEEEN is clear */
+ lan743x_mac_eee_enable(adapter, false);
+
return 0;
}
static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
{
- u32 sgmii_ctl;
int ret;
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
@@ -3534,10 +3570,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
adapter->mdiobus->priv = (void *)adapter;
if (adapter->is_pci11x1x) {
if (adapter->is_sgmii_en) {
- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
- sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
- sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"SGMII operation\n");
adapter->mdiobus->read = lan743x_mdiobus_read_c22;
@@ -3548,10 +3580,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
netif_dbg(adapter, drv, adapter->netdev,
"lan743x-mdiobus-c45\n");
} else {
- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
- sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
- sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"RGMII operation\n");
// Only C22 support when RGMII I/F
@@ -3637,6 +3665,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
if (ret)
goto cleanup_pci;
+ ret = lan743x_hw_reset_phy(adapter);
+ if (ret)
+ goto cleanup_pci;
+
ret = lan743x_hardware_init(adapter, pdev);
if (ret)
goto cleanup_pci;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 8ef897c114d3..02a28b709163 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -980,6 +980,7 @@ struct lan743x_tx {
u32 frame_first;
u32 frame_data0;
u32 frame_tail;
+ u32 frame_last;
struct lan743x_tx_buffer_info *buffer_info;
@@ -1086,6 +1087,7 @@ struct lan743x_adapter {
phy_interface_t phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
+ int rx_tstamp_filter;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
@@ -1206,6 +1208,5 @@ void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
bool tx_enable, bool rx_enable);
int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr);
-void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable);
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 4a777b449ecd..a3b48388b3fd 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -463,10 +463,6 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
struct lan743x_ptp_perout *perout = &ptp->perout[index];
int ret = 0;
- /* Reject requests with unsupported flags */
- if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
if (on) {
perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
perout_request->index);
@@ -1537,6 +1533,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
ptp->ptp_clock_info.n_pins = n_pins;
ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS;
+ ptp->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ ptp->ptp_clock_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
ptp->ptp_clock_info.pin_config = ptp->pin_config;
ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
@@ -1736,23 +1736,32 @@ void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
lan743x_ptp_tx_ts_complete(adapter);
}
-int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+int lan743x_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
- int ret = 0;
- int index;
+ struct lan743x_tx *tx = &adapter->tx[0];
- if (!ifr) {
- netif_err(adapter, drv, adapter->netdev,
- "SIOCSHWTSTAMP, ifr == NULL\n");
- return -EINVAL;
- }
+ if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ else if (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED)
+ config->tx_type = HWTSTAMP_TX_ON;
+ else
+ config->tx_type = HWTSTAMP_TX_OFF;
+
+ config->rx_filter = adapter->rx_tstamp_filter;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ return 0;
+}
- switch (config.tx_type) {
+int lan743x_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int index;
+
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
for (index = 0; index < adapter->used_tx_channels;
index++)
@@ -1776,19 +1785,12 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
lan743x_ptp_set_sync_ts_insert(adapter, true);
break;
case HWTSTAMP_TX_ONESTEP_P2P:
- ret = -ERANGE;
- break;
+ return -ERANGE;
default:
netif_warn(adapter, drv, adapter->netdev,
- " tx_type = %d, UNKNOWN\n", config.tx_type);
- ret = -EINVAL;
- break;
+ " tx_type = %d, UNKNOWN\n", config->tx_type);
+ return -EINVAL;
}
- ret = lan743x_rx_set_tstamp_mode(adapter, config.rx_filter);
-
- if (!ret)
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
- return ret;
+ return lan743x_rx_set_tstamp_mode(adapter, config->rx_filter);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index 0d29914cd460..f33dc83c5700 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -18,9 +18,9 @@
*/
#define LAN743X_PTP_N_EVENT_CHAN 2
#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
-#define LAN743X_PTP_N_EXTTS 4
-#define LAN743X_PTP_N_PPS 0
#define PCI11X1X_PTP_IO_MAX_CHANNELS 8
+#define LAN743X_PTP_N_EXTTS PCI11X1X_PTP_IO_MAX_CHANNELS
+#define LAN743X_PTP_N_PPS 0
#define PTP_CMD_CTL_TIMEOUT_CNT 50
struct lan743x_adapter;
@@ -51,8 +51,11 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter);
void lan743x_ptp_close(struct lan743x_adapter *adapter);
void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
u32 link_speed);
-
-int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int lan743x_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int lan743x_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 3234a960fcc3..7001584f1b7a 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -353,6 +353,11 @@ static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD);
}
+static void lan966x_ifh_set_oam_type(void *ifh, u64 oam_type)
+{
+ lan966x_ifh_set(ifh, oam_type, IFH_POS_PDU_TYPE, IFH_WID_PDU_TYPE);
+}
+
static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
{
lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
@@ -380,6 +385,7 @@ static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
return err;
lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
+ lan966x_ifh_set_oam_type(ifh, LAN966X_SKB_CB(skb)->pdu_type);
lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
}
@@ -828,7 +834,6 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink_config.type = PHYLINK_NETDEV;
port->phylink_pcs.poll = true;
port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
- port->phylink_pcs.neg_mode = true;
port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
@@ -874,6 +879,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
lan966x_vlan_port_set_vlan_aware(port, 0);
lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
lan966x_vlan_port_apply(port);
+ lan966x_vlan_port_rew_host(port);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 25cb2f61986f..4f75f0688369 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -75,6 +75,10 @@
#define IFH_REW_OP_ONE_STEP_PTP 0x3
#define IFH_REW_OP_TWO_STEP_PTP 0x4
+#define IFH_PDU_TYPE_NONE 0
+#define IFH_PDU_TYPE_IPV4 7
+#define IFH_PDU_TYPE_IPV6 8
+
#define FDMA_RX_DCB_MAX_DBS 1
#define FDMA_TX_DCB_MAX_DBS 1
@@ -254,6 +258,7 @@ struct lan966x_phc {
struct lan966x_skb_cb {
u8 rew_op;
+ u8 pdu_type;
u16 ts_id;
unsigned long jiffies;
};
@@ -443,7 +448,7 @@ int lan966x_stats_init(struct lan966x *lan966x);
void lan966x_port_config_down(struct lan966x_port *port);
void lan966x_port_config_up(struct lan966x_port *port);
-void lan966x_port_status_get(struct lan966x_port *port,
+void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode,
struct phylink_link_state *state);
int lan966x_port_pcs_set(struct lan966x_port *port,
struct lan966x_port_config *config);
@@ -492,6 +497,7 @@ void lan966x_vlan_port_apply(struct lan966x_port *port);
bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
bool vlan_aware);
+void lan966x_vlan_port_rew_host(struct lan966x_port *port);
int lan966x_vlan_port_set_vid(struct lan966x_port *port,
u16 vid,
bool pvid,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
index 1d63903f9006..75188b99e4e7 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -88,11 +88,12 @@ static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs)
}
static void lan966x_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lan966x_port *port = lan966x_pcs_to_port(pcs);
- lan966x_port_status_get(port, state);
+ lan966x_port_status_get(port, neg_mode, state);
}
static int lan966x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index fdfa4040d9ee..cf7de0267c32 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -284,7 +284,7 @@ void lan966x_port_config_up(struct lan966x_port *port)
lan966x_port_link_up(port);
}
-void lan966x_port_status_get(struct lan966x_port *port,
+void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lan966x *lan966x = port->lan966x;
@@ -314,7 +314,7 @@ void lan966x_port_status_get(struct lan966x_port *port,
bmsr |= BMSR_ANEGCOMPLETE;
lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val);
- phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lp_adv);
} else {
if (!state->link)
return;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index 63905bb5a63a..b4377b8613c3 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -322,34 +322,55 @@ void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
*cfg = phc->hwtstamp_config;
}
-static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
+static void lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb,
+ u8 *rew_op, u8 *pdu_type)
{
struct ptp_header *header;
u8 msgtype;
int type;
- if (port->ptp_tx_cmd == IFH_REW_OP_NOOP)
- return IFH_REW_OP_NOOP;
+ if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
type = ptp_classify_raw(skb);
- if (type == PTP_CLASS_NONE)
- return IFH_REW_OP_NOOP;
+ if (type == PTP_CLASS_NONE) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
header = ptp_parse_header(skb, type);
- if (!header)
- return IFH_REW_OP_NOOP;
+ if (!header) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
- if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP)
- return IFH_REW_OP_TWO_STEP_PTP;
+ if (type & PTP_CLASS_L2)
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ if (type & PTP_CLASS_IPV4)
+ *pdu_type = IFH_PDU_TYPE_IPV4;
+ if (type & PTP_CLASS_IPV6)
+ *pdu_type = IFH_PDU_TYPE_IPV6;
+
+ if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+ return;
+ }
/* If it is sync and run 1 step then set the correct operation,
* otherwise run as 2 step
*/
msgtype = ptp_get_msgtype(header, type);
- if ((msgtype & 0xf) == 0)
- return IFH_REW_OP_ONE_STEP_PTP;
+ if ((msgtype & 0xf) == 0) {
+ *rew_op = IFH_REW_OP_ONE_STEP_PTP;
+ return;
+ }
- return IFH_REW_OP_TWO_STEP_PTP;
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
}
static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
@@ -374,10 +395,12 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
{
struct lan966x *lan966x = port->lan966x;
unsigned long flags;
+ u8 pdu_type;
u8 rew_op;
- rew_op = lan966x_ptp_classify(port, skb);
+ lan966x_ptp_classify(port, skb, &rew_op, &pdu_type);
LAN966X_SKB_CB(skb)->rew_op = rew_op;
+ LAN966X_SKB_CB(skb)->pdu_type = pdu_type;
if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
return 0;
@@ -815,10 +838,6 @@ static int lan966x_ptp_perout(struct ptp_clock_info *ptp,
bool pps = false;
int pin;
- if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
- PTP_PEROUT_PHASE))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
return -EINVAL;
@@ -917,12 +936,6 @@ static int lan966x_ptp_extts(struct ptp_clock_info *ptp,
if (lan966x->ptp_ext_irq <= 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
return -EINVAL;
@@ -978,6 +991,10 @@ static struct ptp_clock_info lan966x_ptp_clock_info = {
.n_per_out = LAN966X_PHC_PINS_NUM,
.n_ext_ts = LAN966X_PHC_PINS_NUM,
.n_pins = LAN966X_PHC_PINS_NUM,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_STRICT_FLAGS,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE,
};
static int lan966x_ptp_phc_init(struct lan966x *lan966x,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index 1c88120eb291..bcb4db76b75c 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -297,6 +297,7 @@ static void lan966x_port_bridge_leave(struct lan966x_port *port,
lan966x_vlan_port_set_vlan_aware(port, false);
lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
lan966x_vlan_port_apply(port);
+ lan966x_vlan_port_rew_host(port);
}
int lan966x_port_changeupper(struct net_device *dev,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
index fa34a739c748..7da22520724c 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -149,6 +149,27 @@ void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
port->vlan_aware = vlan_aware;
}
+/* When the interface is in host mode, the interface should not be vlan aware
+ * but it should insert all the tags that it gets from the network stack.
+ * The tags are not in the data of the frame but actually in the skb and the ifh
+ * is configured already to get this tag. So what we need to do is to update the
+ * rewriter to insert the vlan tag for all frames which have a vlan tag
+ * different than 0.
+ */
+void lan966x_vlan_port_rew_host(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ /* Tag all frames except when VID=0*/
+ val = REW_TAG_CFG_TAG_CFG_SET(2);
+
+ /* Update only some bits in the register */
+ lan_rmw(val,
+ REW_TAG_CFG_TAG_CFG,
+ lan966x, REW_TAG_CFG(port->chip_port));
+}
+
void lan966x_vlan_port_apply(struct lan966x_port *port)
{
struct lan966x *lan966x = port->lan966x;
diff --git a/drivers/net/ethernet/microchip/lan969x/Kconfig b/drivers/net/ethernet/microchip/lan969x/Kconfig
deleted file mode 100644
index c5c6122ae2ec..000000000000
--- a/drivers/net/ethernet/microchip/lan969x/Kconfig
+++ /dev/null
@@ -1,5 +0,0 @@
-config LAN969X_SWITCH
- bool "Lan969x switch driver"
- depends on SPARX5_SWITCH
- help
- This driver supports the lan969x family of network switch devices.
diff --git a/drivers/net/ethernet/microchip/lan969x/Makefile b/drivers/net/ethernet/microchip/lan969x/Makefile
deleted file mode 100644
index 316405cbbc71..000000000000
--- a/drivers/net/ethernet/microchip/lan969x/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Microchip lan969x network device drivers.
-#
-
-obj-$(CONFIG_SPARX5_SWITCH) += lan969x-switch.o
-
-lan969x-switch-y := lan969x_regs.o lan969x.o lan969x_calendar.o \
- lan969x_vcap_ag_api.o lan969x_vcap_impl.o
-
-# Provide include files
-ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
-ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index 3f04992eace6..35e1c0cf345e 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -24,3 +24,10 @@ config SPARX5_DCB
DSCP and PCP.
If unsure, set to Y.
+
+config LAN969X_SWITCH
+ bool "Lan969x switch driver"
+ depends on SPARX5_SWITCH
+ select PAGE_POOL
+ help
+ This driver supports the lan969x family of network switch devices.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index 3435ca86dd70..d447f9e84d92 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -16,6 +16,14 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \
sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o
sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
+sparx5-switch-$(CONFIG_LAN969X_SWITCH) += lan969x/lan969x_regs.o \
+ lan969x/lan969x.o \
+ lan969x/lan969x_calendar.o \
+ lan969x/lan969x_vcap_ag_api.o \
+ lan969x/lan969x_vcap_impl.o \
+ lan969x/lan969x_rgmii.o \
+ lan969x/lan969x_fdma.o
+
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
index ac37d0f74ee3..f3a9c71bea36 100644
--- a/drivers/net/ethernet/microchip/lan969x/lan969x.c
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
@@ -90,9 +90,12 @@ static const struct sparx5_main_io_resource lan969x_main_iomap[] = {
{ TARGET_DEV2G5 + 27, 0x30d8000, 1 }, /* 0xe30d8000 */
{ TARGET_DEV10G + 9, 0x30dc000, 1 }, /* 0xe30dc000 */
{ TARGET_PCS10G_BR + 9, 0x30e0000, 1 }, /* 0xe30e0000 */
+ { TARGET_DEVRGMII, 0x30e4000, 1 }, /* 0xe30e4000 */
+ { TARGET_DEVRGMII + 1, 0x30e8000, 1 }, /* 0xe30e8000 */
{ TARGET_DSM, 0x30ec000, 1 }, /* 0xe30ec000 */
{ TARGET_PORT_CONF, 0x30f0000, 1 }, /* 0xe30f0000 */
{ TARGET_ASM, 0x3200000, 1 }, /* 0xe3200000 */
+ { TARGET_HSIO_WRAP, 0x3408000, 1 }, /* 0xe3408000 */
};
static struct sparx5_sdlb_group lan969x_sdlb_groups[LAN969X_SDLB_GRP_CNT] = {
@@ -273,9 +276,9 @@ static irqreturn_t lan969x_ptp_irq_handler(int irq, void *args)
if (WARN_ON(!skb_match))
continue;
- spin_lock(&sparx5->ptp_ts_id_lock);
+ spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
sparx5->ptp_skbs--;
- spin_unlock(&sparx5->ptp_ts_id_lock);
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
/* Get the h/w timestamp */
sparx5_get_hwtimestamp(sparx5, &ts, delay);
@@ -329,6 +332,7 @@ static const struct sparx5_ops lan969x_ops = {
.is_port_5g = &lan969x_port_is_5g,
.is_port_10g = &lan969x_port_is_10g,
.is_port_25g = &lan969x_port_is_25g,
+ .is_port_rgmii = &lan969x_port_is_rgmii,
.get_port_dev_index = &lan969x_port_dev_mapping,
.get_port_dev_bit = &lan969x_get_dev_mode_bit,
.get_hsch_max_group_rate = &lan969x_get_hsch_max_group_rate,
@@ -336,6 +340,11 @@ static const struct sparx5_ops lan969x_ops = {
.set_port_mux = &lan969x_port_mux_set,
.ptp_irq_handler = &lan969x_ptp_irq_handler,
.dsm_calendar_calc = &lan969x_dsm_calendar_calc,
+ .port_config_rgmii = &lan969x_port_config_rgmii,
+ .fdma_init = &lan969x_fdma_init,
+ .fdma_deinit = &lan969x_fdma_deinit,
+ .fdma_poll = &lan969x_fdma_napi_poll,
+ .fdma_xmit = &lan969x_fdma_xmit,
};
const struct sparx5_match_data lan969x_desc = {
@@ -346,8 +355,3 @@ const struct sparx5_match_data lan969x_desc = {
.consts = &lan969x_consts,
.ops = &lan969x_ops,
};
-EXPORT_SYMBOL_GPL(lan969x_desc);
-
-MODULE_DESCRIPTION("Microchip lan969x switch driver");
-MODULE_AUTHOR("Daniel Machon <daniel.machon@microchip.com>");
-MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x.h b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
index 2489d0d32dfd..529fde3d4deb 100644
--- a/drivers/net/ethernet/microchip/lan969x/lan969x.h
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
@@ -59,7 +59,24 @@ static inline bool lan969x_port_is_25g(int portno)
return false;
}
+static inline bool lan969x_port_is_rgmii(int portno)
+{
+ return portno == 28 || portno == 29;
+}
+
/* lan969x_calendar.c */
int lan969x_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
struct sparx5_calendar_data *data);
+
+/* lan969x_rgmii.c */
+int lan969x_port_config_rgmii(struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+
+/* lan969x_fdma.c */
+int lan969x_fdma_init(struct sparx5 *sparx5);
+int lan969x_fdma_deinit(struct sparx5 *sparx5);
+int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight);
+int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
+
#endif
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_calendar.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c
index e857640df185..e857640df185 100644
--- a/drivers/net/ethernet/microchip/lan969x/lan969x_calendar.c
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
new file mode 100644
index 000000000000..1282f5c3ee6d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2025 Microchip Technology Inc. and its subsidiaries.
+ */
+#include <net/page_pool/helpers.h>
+
+#include "../sparx5_main.h"
+#include "../sparx5_main_regs.h"
+#include "../sparx5_port.h"
+
+#include "fdma_api.h"
+#include "lan969x.h"
+
+#define FDMA_PRIV(fdma) ((struct sparx5 *)((fdma)->priv))
+
+static int lan969x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ *dataptr = FDMA_PRIV(fdma)->tx.dbs[dcb].dma_addr;
+
+ return 0;
+}
+
+static int lan969x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct sparx5_rx *rx = &FDMA_PRIV(fdma)->rx;
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ rx->page[dcb][db] = page;
+
+ *dataptr = page_pool_get_dma_addr(page);
+
+ return 0;
+}
+
+static int lan969x_fdma_get_next_dcb(struct sparx5_tx *tx)
+{
+ struct fdma *fdma = &tx->fdma;
+
+ for (int i = 0; i < fdma->n_dcbs; ++i)
+ if (!tx->dbs[i].used && !fdma_is_last(fdma, &fdma->dcbs[i]))
+ return i;
+
+ return -ENOSPC;
+}
+
+static void lan969x_fdma_tx_clear_buf(struct sparx5 *sparx5, int weight)
+{
+ struct fdma *fdma = &sparx5->tx.fdma;
+ struct sparx5_tx_buf *db;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&sparx5->tx_lock, flags);
+
+ for (i = 0; i < fdma->n_dcbs; ++i) {
+ db = &sparx5->tx.dbs[i];
+
+ if (!db->used)
+ continue;
+
+ if (!fdma_db_is_done(fdma_db_get(fdma, i, 0)))
+ continue;
+
+ db->dev->stats.tx_bytes += db->skb->len;
+ db->dev->stats.tx_packets++;
+ sparx5->tx.packets++;
+
+ dma_unmap_single(sparx5->dev,
+ db->dma_addr,
+ db->skb->len,
+ DMA_TO_DEVICE);
+
+ if (!db->ptp)
+ napi_consume_skb(db->skb, weight);
+
+ db->used = false;
+ }
+
+ spin_unlock_irqrestore(&sparx5->tx_lock, flags);
+}
+
+static void lan969x_fdma_free_pages(struct sparx5_rx *rx)
+{
+ struct fdma *fdma = &rx->fdma;
+
+ for (int i = 0; i < fdma->n_dcbs; ++i) {
+ for (int j = 0; j < fdma->n_dbs; ++j)
+ page_pool_put_full_page(rx->page_pool,
+ rx->page[i][j], false);
+ }
+}
+
+static struct sk_buff *lan969x_fdma_rx_get_frame(struct sparx5 *sparx5,
+ struct sparx5_rx *rx)
+{
+ const struct sparx5_consts *consts = sparx5->data->consts;
+ struct fdma *fdma = &rx->fdma;
+ struct sparx5_port *port;
+ struct frame_info fi;
+ struct sk_buff *skb;
+ struct fdma_db *db;
+ struct page *page;
+
+ db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
+ page = rx->page[fdma->dcb_index][fdma->db_index];
+
+ sparx5_ifh_parse(sparx5, page_address(page), &fi);
+ port = fi.src_port < consts->n_ports ? sparx5->ports[fi.src_port] :
+ NULL;
+ if (WARN_ON(!port))
+ goto free_page;
+
+ skb = build_skb(page_address(page), fdma->db_size);
+ if (unlikely(!skb))
+ goto free_page;
+
+ skb_mark_for_recycle(skb);
+ skb_put(skb, fdma_db_len_get(db));
+ skb_pull(skb, IFH_LEN * sizeof(u32));
+
+ skb->dev = port->ndev;
+
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ skb->offload_fwd_mark = 1;
+
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ return skb;
+
+free_page:
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ return NULL;
+}
+
+static int lan969x_fdma_rx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct fdma *fdma = &rx->fdma;
+ int err;
+
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = fdma->n_dcbs * fdma->n_dbs,
+ .nid = NUMA_NO_NODE,
+ .dev = sparx5->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = fdma->db_size -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ };
+
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool))
+ return PTR_ERR(rx->page_pool);
+
+ err = fdma_alloc_coherent(sparx5->dev, fdma);
+ if (err)
+ return err;
+
+ fdma_dcbs_init(fdma,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ return 0;
+}
+
+static int lan969x_fdma_tx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
+ int err;
+
+ tx->dbs = kcalloc(fdma->n_dcbs,
+ sizeof(struct sparx5_tx_buf),
+ GFP_KERNEL);
+ if (!tx->dbs)
+ return -ENOMEM;
+
+ err = fdma_alloc_coherent(sparx5->dev, fdma);
+ if (err) {
+ kfree(tx->dbs);
+ return err;
+ }
+
+ fdma_dcbs_init(fdma,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_DONE);
+
+ return 0;
+}
+
+static void lan969x_fdma_rx_init(struct sparx5 *sparx5)
+{
+ struct fdma *fdma = &sparx5->rx.fdma;
+
+ fdma->channel_id = FDMA_XTR_CHANNEL;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = 1;
+ fdma->priv = sparx5;
+ fdma->size = fdma_get_size(fdma);
+ fdma->db_size = PAGE_SIZE;
+ fdma->ops.dataptr_cb = &lan969x_fdma_rx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
+
+ /* Fetch a netdev for SKB and NAPI use, any will do */
+ for (int idx = 0; idx < sparx5->data->consts->n_ports; ++idx) {
+ struct sparx5_port *port = sparx5->ports[idx];
+
+ if (port && port->ndev) {
+ sparx5->rx.ndev = port->ndev;
+ break;
+ }
+ }
+}
+
+static void lan969x_fdma_tx_init(struct sparx5 *sparx5)
+{
+ struct fdma *fdma = &sparx5->tx.fdma;
+
+ fdma->channel_id = FDMA_INJ_CHANNEL;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = 1;
+ fdma->priv = sparx5;
+ fdma->size = fdma_get_size(fdma);
+ fdma->db_size = PAGE_SIZE;
+ fdma->ops.dataptr_cb = &lan969x_fdma_tx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
+}
+
+int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
+ struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
+ int old_dcb, dcb_reload, counter = 0;
+ struct fdma *fdma = &rx->fdma;
+ struct sk_buff *skb;
+
+ dcb_reload = fdma->dcb_index;
+
+ lan969x_fdma_tx_clear_buf(sparx5, weight);
+
+ /* Process RX data */
+ while (counter < weight) {
+ if (!fdma_has_frames(fdma))
+ break;
+
+ skb = lan969x_fdma_rx_get_frame(sparx5, rx);
+ if (!skb)
+ break;
+
+ napi_gro_receive(&rx->napi, skb);
+
+ fdma_db_advance(fdma);
+ counter++;
+ /* Check if the DCB can be reused */
+ if (fdma_dcb_is_reusable(fdma))
+ continue;
+
+ fdma_db_reset(fdma);
+ fdma_dcb_advance(fdma);
+ }
+
+ /* Allocate new pages and map them */
+ while (dcb_reload != fdma->dcb_index) {
+ old_dcb = dcb_reload;
+ dcb_reload++;
+ /* n_dcbs must be a power of 2 */
+ dcb_reload &= fdma->n_dcbs - 1;
+
+ fdma_dcb_add(fdma,
+ old_dcb,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ sparx5_fdma_reload(sparx5, fdma);
+ }
+
+ if (counter < weight && napi_complete_done(napi, counter))
+ spx5_wr(0xff, sparx5, FDMA_INTR_DB_ENA);
+
+ return counter;
+}
+
+int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int next_dcb, needed_headroom, needed_tailroom, err;
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
+ struct sparx5_tx_buf *db_buf;
+ u64 status;
+
+ next_dcb = lan969x_fdma_get_next_dcb(tx);
+ if (next_dcb < 0)
+ return -EBUSY;
+
+ needed_headroom = max_t(int, IFH_LEN * 4 - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+ }
+
+ skb_push(skb, IFH_LEN * 4);
+ memcpy(skb->data, ifh, IFH_LEN * 4);
+ skb_put(skb, ETH_FCS_LEN);
+
+ db_buf = &tx->dbs[next_dcb];
+ db_buf->dma_addr = dma_map_single(sparx5->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sparx5->dev, db_buf->dma_addr))
+ return -ENOMEM;
+
+ db_buf->dev = dev;
+ db_buf->skb = skb;
+ db_buf->ptp = false;
+ db_buf->used = true;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ db_buf->ptp = true;
+
+ status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len) |
+ FDMA_DCB_STATUS_INTR;
+
+ fdma_dcb_advance(fdma);
+ fdma_dcb_add(fdma, next_dcb, 0, status);
+
+ sparx5_fdma_reload(sparx5, fdma);
+
+ return NETDEV_TX_OK;
+}
+
+int lan969x_fdma_init(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ int err;
+
+ lan969x_fdma_rx_init(sparx5);
+ lan969x_fdma_tx_init(sparx5);
+ sparx5_fdma_injection_mode(sparx5);
+
+ err = dma_set_mask_and_coherent(sparx5->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(sparx5->dev, "Failed to set 64-bit FDMA mask");
+ return err;
+ }
+
+ err = lan969x_fdma_rx_alloc(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Failed to allocate RX buffers: %d\n",
+ err);
+ return err;
+ }
+
+ err = lan969x_fdma_tx_alloc(sparx5);
+ if (err) {
+ fdma_free_coherent(sparx5->dev, &rx->fdma);
+ dev_err(sparx5->dev, "Failed to allocate TX buffers: %d\n",
+ err);
+ return err;
+ }
+
+ /* Reset FDMA state */
+ spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL);
+ spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL);
+
+ return err;
+}
+
+int lan969x_fdma_deinit(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
+
+ sparx5_fdma_stop(sparx5);
+ fdma_free_coherent(sparx5->dev, &tx->fdma);
+ fdma_free_coherent(sparx5->dev, &rx->fdma);
+ lan969x_fdma_free_pages(rx);
+ page_pool_destroy(rx->page_pool);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_regs.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c
index ace4ba21eec4..ace4ba21eec4 100644
--- a/drivers/net/ethernet/microchip/lan969x/lan969x_regs.c
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c
new file mode 100644
index 000000000000..4e422ca50828
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "lan969x.h"
+
+/* Tx clock selectors */
+#define LAN969X_RGMII_TX_CLK_SEL_125MHZ 1 /* 1000Mbps */
+#define LAN969X_RGMII_TX_CLK_SEL_25MHZ 2 /* 100Mbps */
+#define LAN969X_RGMII_TX_CLK_SEL_2M5MHZ 3 /* 10Mbps */
+
+/* Port speed selectors */
+#define LAN969X_RGMII_SPEED_SEL_10 0 /* Select 10Mbps speed */
+#define LAN969X_RGMII_SPEED_SEL_100 1 /* Select 100Mbps speed */
+#define LAN969X_RGMII_SPEED_SEL_1000 2 /* Select 1000Mbps speed */
+
+/* Clock delay selectors */
+#define LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS 2 /* Phase shift 45deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS 3 /* Phase shift 77deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS 4 /* Phase shift 90deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS 5 /* Phase shift 112deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS 6 /* Phase shift 135deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS 7 /* Phase shift 147deg */
+
+#define LAN969X_RGMII_PORT_START_IDX 28 /* Index of the first RGMII port */
+#define LAN969X_RGMII_IFG_TX 4 /* TX Inter Frame Gap value */
+#define LAN969X_RGMII_IFG_RX1 5 /* RX1 Inter Frame Gap value */
+#define LAN969X_RGMII_IFG_RX2 1 /* RX2 Inter Frame Gap value */
+
+#define RGMII_PORT_IDX(port) ((port)->portno - LAN969X_RGMII_PORT_START_IDX)
+
+/* Get the tx clock selector based on the port speed. */
+static int lan969x_rgmii_get_clk_sel(int speed)
+{
+ return (speed == SPEED_10 ? LAN969X_RGMII_TX_CLK_SEL_2M5MHZ :
+ speed == SPEED_100 ? LAN969X_RGMII_TX_CLK_SEL_25MHZ :
+ LAN969X_RGMII_TX_CLK_SEL_125MHZ);
+}
+
+/* Get the port speed selector based on the port speed. */
+static int lan969x_rgmii_get_speed_sel(int speed)
+{
+ return (speed == SPEED_10 ? LAN969X_RGMII_SPEED_SEL_10 :
+ speed == SPEED_100 ? LAN969X_RGMII_SPEED_SEL_100 :
+ LAN969X_RGMII_SPEED_SEL_1000);
+}
+
+/* Get the clock delay selector based on the clock delay in picoseconds. */
+static int lan969x_rgmii_get_clk_delay_sel(struct sparx5_port *port,
+ u32 delay_ps, u32 *clk_delay_sel)
+{
+ switch (delay_ps) {
+ case 0:
+ /* Hardware default selector. */
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS;
+ break;
+ case 1000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS;
+ break;
+ case 1700:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS;
+ break;
+ case 2000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS;
+ break;
+ case 2500:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS;
+ break;
+ case 3000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS;
+ break;
+ case 3300:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS;
+ break;
+ default:
+ dev_err(port->sparx5->dev, "Invalid RGMII delay: %u", delay_ps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Configure the RGMII tx clock frequency. */
+static void lan969x_rgmii_tx_clk_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 clk_sel = lan969x_rgmii_get_clk_sel(conf->speed);
+ u32 idx = RGMII_PORT_IDX(port);
+
+ /* Take the RGMII clock domain out of reset and set tx clock
+ * frequency.
+ */
+ spx5_rmw(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(clk_sel) |
+ HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(0) |
+ HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(0),
+ HSIO_WRAP_RGMII_CFG_TX_CLK_CFG |
+ HSIO_WRAP_RGMII_CFG_RGMII_TX_RST |
+ HSIO_WRAP_RGMII_CFG_RGMII_RX_RST,
+ port->sparx5, HSIO_WRAP_RGMII_CFG(idx));
+}
+
+/* Configure the RGMII port device. */
+static void lan969x_rgmii_port_device_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 dtag, dotag, etype, speed_sel, idx = RGMII_PORT_IDX(port);
+
+ speed_sel = lan969x_rgmii_get_speed_sel(conf->speed);
+
+ etype = (port->vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
+ port->custom_etype :
+ port->vlan_type == SPX5_VLAN_PORT_TYPE_C ?
+ ETH_P_8021Q : ETH_P_8021AD);
+
+ dtag = port->max_vlan_tags == SPX5_PORT_MAX_TAGS_TWO;
+ dotag = port->max_vlan_tags != SPX5_PORT_MAX_TAGS_NONE;
+
+ /* Enable the MAC. */
+ spx5_wr(DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(1),
+ port->sparx5, DEVRGMII_MAC_ENA_CFG(idx));
+
+ /* Configure the Inter Frame Gap. */
+ spx5_wr(DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(LAN969X_RGMII_IFG_TX) |
+ DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(LAN969X_RGMII_IFG_RX1) |
+ DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(LAN969X_RGMII_IFG_RX2),
+ port->sparx5, DEVRGMII_MAC_IFG_CFG(idx));
+
+ /* Configure port data rate. */
+ spx5_wr(DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(speed_sel),
+ port->sparx5, DEVRGMII_DEV_RST_CTRL(idx));
+
+ /* Configure VLAN awareness. */
+ spx5_wr(DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+ DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
+ DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
+ DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
+ port->sparx5,
+ DEVRGMII_MAC_TAGS_CFG(idx));
+}
+
+/* Configure the RGMII delay lines in the MAC.
+ *
+ * We use the rx-internal-delay-ps" and "tx-internal-delay-ps" properties to
+ * configure the rx and tx delays for the MAC. If these properties are missing
+ * or set to zero, the MAC will not apply any delay.
+ *
+ * The PHY side delays are determined by the PHY mode
+ * (e.g. PHY_INTERFACE_MODE_RGMII_{ID, RXID, TXID}), and ignored by the MAC side
+ * entirely.
+ */
+static int lan969x_rgmii_delay_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 tx_clk_sel, rx_clk_sel, tx_delay_ps = 0, rx_delay_ps = 0;
+ u32 idx = RGMII_PORT_IDX(port);
+ int err;
+
+ of_property_read_u32(port->of_node, "rx-internal-delay-ps",
+ &rx_delay_ps);
+
+ of_property_read_u32(port->of_node, "tx-internal-delay-ps",
+ &tx_delay_ps);
+
+ err = lan969x_rgmii_get_clk_delay_sel(port, rx_delay_ps, &rx_clk_sel);
+ if (err)
+ return err;
+
+ err = lan969x_rgmii_get_clk_delay_sel(port, tx_delay_ps, &tx_clk_sel);
+ if (err)
+ return err;
+
+ /* Configure rx delay. */
+ spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!rx_delay_ps) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(rx_clk_sel),
+ HSIO_WRAP_DLL_CFG_DLL_RST |
+ HSIO_WRAP_DLL_CFG_DLL_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL,
+ port->sparx5, HSIO_WRAP_DLL_CFG(idx, 0));
+
+ /* Configure tx delay. */
+ spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!tx_delay_ps) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(tx_clk_sel),
+ HSIO_WRAP_DLL_CFG_DLL_RST |
+ HSIO_WRAP_DLL_CFG_DLL_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL,
+ port->sparx5, HSIO_WRAP_DLL_CFG(idx, 1));
+
+ return 0;
+}
+
+/* Configure GPIO's to be used as RGMII interface. */
+static void lan969x_rgmii_gpio_config(struct sparx5_port *port)
+{
+ u32 idx = RGMII_PORT_IDX(port);
+
+ /* Enable the RGMII on the GPIOs. */
+ spx5_wr(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(1), port->sparx5,
+ HSIO_WRAP_XMII_CFG(!idx));
+}
+
+int lan969x_port_config_rgmii(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ int err;
+
+ err = lan969x_rgmii_delay_config(port, conf);
+ if (err)
+ return err;
+
+ lan969x_rgmii_tx_clk_config(port, conf);
+ lan969x_rgmii_gpio_config(port);
+ lan969x_rgmii_port_device_config(port, conf);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_ag_api.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c
index 7acc5bcf337a..7acc5bcf337a 100644
--- a/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_ag_api.c
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c
index 543a1f2bf6bd..543a1f2bf6bd 100644
--- a/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
index 5fe941c66c17..5c46d81de530 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
@@ -98,7 +98,6 @@ u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
default: return 0;
}
}
-EXPORT_SYMBOL_GPL(sparx5_cal_speed_to_value);
static u32 sparx5_bandwidth_to_calendar(u32 bw)
{
@@ -150,7 +149,6 @@ enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, u32 portno)
return SPX5_CAL_SPEED_NONE;
return sparx5_bandwidth_to_calendar(port->conf.bandwidth);
}
-EXPORT_SYMBOL_GPL(sparx5_get_port_cal_speed);
/* Auto configure the QSYS calendar based on port configuration */
int sparx5_config_auto_calendar(struct sparx5 *sparx5)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 0027144a2af2..dbe86f937b21 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -18,9 +18,6 @@
#include "sparx5_main.h"
#include "sparx5_port.h"
-#define FDMA_XTR_CHANNEL 6
-#define FDMA_INJ_CHANNEL 0
-
#define FDMA_XTR_BUFFER_SIZE 2048
#define FDMA_WEIGHT 4
@@ -133,7 +130,7 @@ static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *t
sparx5, FDMA_CH_ACTIVATE);
}
-static void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
+void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
{
/* Reload the RX channel */
spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_RELOAD);
@@ -183,7 +180,7 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx
return true;
}
-static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
+int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
{
struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
@@ -213,11 +210,11 @@ static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
return counter;
}
-int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev)
{
struct sparx5_tx *tx = &sparx5->tx;
struct fdma *fdma = &tx->fdma;
- static bool first_time = true;
void *virt_addr;
fdma_dcb_advance(fdma);
@@ -238,12 +235,8 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
FDMA_DCB_STATUS_BLOCKO(0) |
FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4));
- if (first_time) {
- sparx5_fdma_tx_activate(sparx5, tx);
- first_time = false;
- } else {
- sparx5_fdma_reload(sparx5, fdma);
- }
+ sparx5_fdma_reload(sparx5, fdma);
+
return NETDEV_TX_OK;
}
@@ -260,10 +253,6 @@ static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
FDMA_DCB_STATUS_INTR);
- netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback,
- FDMA_WEIGHT);
- napi_enable(&rx->napi);
- sparx5_fdma_rx_activate(sparx5, rx);
return 0;
}
@@ -348,7 +337,7 @@ irqreturn_t sparx5_fdma_handler(int irq, void *args)
return IRQ_HANDLED;
}
-static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
+void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
{
const int byte_swap = 1;
int portno;
@@ -410,7 +399,7 @@ static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
}
}
-int sparx5_fdma_start(struct sparx5 *sparx5)
+int sparx5_fdma_init(struct sparx5 *sparx5)
{
int err;
@@ -443,24 +432,55 @@ int sparx5_fdma_start(struct sparx5 *sparx5)
return err;
}
+int sparx5_fdma_deinit(struct sparx5 *sparx5)
+{
+ sparx5_fdma_stop(sparx5);
+ fdma_free_phys(&sparx5->rx.fdma);
+ fdma_free_phys(&sparx5->tx.fdma);
+
+ return 0;
+}
+
static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5)
{
return spx5_rd(sparx5, FDMA_PORT_CTRL(0));
}
+int sparx5_fdma_start(struct sparx5 *sparx5)
+{
+ const struct sparx5_ops *ops = sparx5->data->ops;
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
+
+ netif_napi_add_weight(rx->ndev,
+ &rx->napi,
+ ops->fdma_poll,
+ FDMA_WEIGHT);
+
+ napi_enable(&rx->napi);
+
+ sparx5_fdma_rx_activate(sparx5, rx);
+ sparx5_fdma_tx_activate(sparx5, tx);
+
+ return 0;
+}
+
int sparx5_fdma_stop(struct sparx5 *sparx5)
{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
u32 val;
- napi_disable(&sparx5->rx.napi);
+ napi_disable(&rx->napi);
+
/* Stop the fdma and channel interrupts */
- sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx);
- sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx);
+ sparx5_fdma_rx_deactivate(sparx5, rx);
+ sparx5_fdma_tx_deactivate(sparx5, tx);
+
/* Wait for the RX channel to stop */
read_poll_timeout(sparx5_fdma_port_ctrl, val,
FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0,
500, 10000, 0, sparx5);
- fdma_free_phys(&sparx5->rx.fdma);
- fdma_free_phys(&sparx5->tx.fdma);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 2f1013f870fb..74ad1d73b465 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -24,7 +24,7 @@
#include <linux/types.h>
#include <linux/reset.h>
-#include "../lan969x/lan969x.h" /* for lan969x match data */
+#include "lan969x/lan969x.h" /* for lan969x match data */
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
@@ -313,10 +313,13 @@ static int sparx5_create_port(struct sparx5 *sparx5,
struct initial_port_config *config)
{
struct sparx5_port *spx5_port;
+ const struct sparx5_ops *ops;
struct net_device *ndev;
struct phylink *phylink;
int err;
+ ops = sparx5->data->ops;
+
ndev = sparx5_create_netdev(sparx5, config->portno);
if (IS_ERR(ndev)) {
dev_err(sparx5->dev, "Could not create net device: %02u\n",
@@ -335,7 +338,6 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->custom_etype = 0x8880; /* Vitesse */
spx5_port->phylink_pcs.poll = true;
spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
- spx5_port->phylink_pcs.neg_mode = true;
spx5_port->is_mrouter = false;
INIT_LIST_HEAD(&spx5_port->tc_templates);
sparx5->ports[config->portno] = spx5_port;
@@ -357,6 +359,9 @@ static int sparx5_create_port(struct sparx5 *sparx5,
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
+ if (ops->is_port_rgmii(spx5_port->portno))
+ phy_interface_set_rgmii(spx5_port->phylink_config.supported_interfaces);
+
__set_bit(PHY_INTERFACE_MODE_SGMII,
spx5_port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
@@ -778,16 +783,19 @@ static int sparx5_start(struct sparx5 *sparx5)
/* Start Frame DMA with fallback to register based INJ/XTR */
err = -ENXIO;
- if (sparx5->fdma_irq >= 0 && is_sparx5(sparx5)) {
- if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0)
- err = devm_request_threaded_irq(sparx5->dev,
- sparx5->fdma_irq,
- NULL,
- sparx5_fdma_handler,
- IRQF_ONESHOT,
- "sparx5-fdma", sparx5);
- if (!err)
- err = sparx5_fdma_start(sparx5);
+ if (sparx5->fdma_irq >= 0) {
+ if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0 ||
+ !is_sparx5(sparx5))
+ err = devm_request_irq(sparx5->dev,
+ sparx5->fdma_irq,
+ sparx5_fdma_handler,
+ 0,
+ "sparx5-fdma", sparx5);
+ if (!err) {
+ err = ops->fdma_init(sparx5);
+ if (!err)
+ sparx5_fdma_start(sparx5);
+ }
if (err)
sparx5->fdma_irq = -ENXIO;
} else {
@@ -831,6 +839,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
struct initial_port_config *configs, *config;
struct device_node *np = pdev->dev.of_node;
struct device_node *ports, *portnp;
+ const struct sparx5_ops *ops;
struct reset_control *reset;
struct sparx5 *sparx5;
int idx = 0, err = 0;
@@ -852,6 +861,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
return -EINVAL;
regs = sparx5->data->regs;
+ ops = sparx5->data->ops;
/* Do switch core reset if available */
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
@@ -881,7 +891,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
for_each_available_child_of_node(ports, portnp) {
struct sparx5_port_config *conf;
- struct phy *serdes;
+ struct phy *serdes = NULL;
u32 portno;
err = of_property_read_u32(portnp, "reg", &portno);
@@ -911,13 +921,17 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
conf->sd_sgpio = ~0;
else
sparx5->sd_sgpio_remapping = true;
- serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
- if (IS_ERR(serdes)) {
- err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
- "port %u: missing serdes\n",
- portno);
- of_node_put(portnp);
- goto cleanup_config;
+ /* There is no SerDes node for RGMII ports. */
+ if (!ops->is_port_rgmii(portno)) {
+ serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = dev_err_probe(sparx5->dev,
+ PTR_ERR(serdes),
+ "port %u: missing serdes\n",
+ portno);
+ of_node_put(portnp);
+ goto cleanup_config;
+ }
}
config->portno = portno;
config->node = portnp;
@@ -1015,6 +1029,7 @@ cleanup_pnode:
static void mchp_sparx5_remove(struct platform_device *pdev)
{
struct sparx5 *sparx5 = platform_get_drvdata(pdev);
+ const struct sparx5_ops *ops = sparx5->data->ops;
debugfs_remove_recursive(sparx5->debugfs_root);
if (sparx5->xtr_irq) {
@@ -1026,7 +1041,7 @@ static void mchp_sparx5_remove(struct platform_device *pdev)
sparx5->fdma_irq = -ENXIO;
}
sparx5_ptp_deinit(sparx5);
- sparx5_fdma_stop(sparx5);
+ ops->fdma_deinit(sparx5);
sparx5_cleanup_ports(sparx5);
sparx5_vcap_destroy(sparx5);
/* Unregister netdevs */
@@ -1073,6 +1088,7 @@ static const struct sparx5_ops sparx5_ops = {
.is_port_5g = &sparx5_port_is_5g,
.is_port_10g = &sparx5_port_is_10g,
.is_port_25g = &sparx5_port_is_25g,
+ .is_port_rgmii = &sparx5_port_is_rgmii,
.get_port_dev_index = &sparx5_port_dev_mapping,
.get_port_dev_bit = &sparx5_port_dev_mapping,
.get_hsch_max_group_rate = &sparx5_get_hsch_max_group_rate,
@@ -1080,6 +1096,10 @@ static const struct sparx5_ops sparx5_ops = {
.set_port_mux = &sparx5_port_mux_set,
.ptp_irq_handler = &sparx5_ptp_irq_handler,
.dsm_calendar_calc = &sparx5_dsm_calendar_calc,
+ .fdma_init = &sparx5_fdma_init,
+ .fdma_deinit = &sparx5_fdma_deinit,
+ .fdma_poll = &sparx5_fdma_napi_callback,
+ .fdma_xmit = &sparx5_fdma_xmit,
};
static const struct sparx5_match_data sparx5_desc = {
@@ -1093,7 +1113,7 @@ static const struct sparx5_match_data sparx5_desc = {
static const struct of_device_id mchp_sparx5_match[] = {
{ .compatible = "microchip,sparx5-switch", .data = &sparx5_desc },
-#if IS_ENABLED(CONFIG_LAN969X_SWITCH)
+#ifdef CONFIG_LAN969X_SWITCH
{ .compatible = "microchip,lan9691-switch", .data = &lan969x_desc },
#endif
{ }
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index d5dd953b0a71..fe7d8bcc0cd9 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -112,6 +112,8 @@ enum sparx5_feature {
#define XTR_QUEUE 0
#define INJ_QUEUE 0
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
#define FDMA_DCB_MAX 64
#define FDMA_RX_DCB_MAX_DBS 15
#define FDMA_TX_DCB_MAX_DBS 1
@@ -157,11 +159,25 @@ struct sparx5_calendar_data {
*/
struct sparx5_rx {
struct fdma fdma;
- struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ struct page_pool *page_pool;
+ union {
+ struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ };
dma_addr_t dma;
struct napi_struct napi;
struct net_device *ndev;
u64 packets;
+ u8 page_order;
+};
+
+/* Used to store information about TX buffers. */
+struct sparx5_tx_buf {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ bool used;
+ bool ptp;
};
/* Frame DMA transmit state:
@@ -169,6 +185,7 @@ struct sparx5_rx {
*/
struct sparx5_tx {
struct fdma fdma;
+ struct sparx5_tx_buf *dbs;
u64 packets;
u64 dropped;
};
@@ -313,6 +330,7 @@ struct sparx5_ops {
bool (*is_port_5g)(int portno);
bool (*is_port_10g)(int portno);
bool (*is_port_25g)(int portno);
+ bool (*is_port_rgmii)(int portno);
u32 (*get_port_dev_index)(struct sparx5 *sparx5, int port);
u32 (*get_port_dev_bit)(struct sparx5 *sparx5, int port);
u32 (*get_hsch_max_group_rate)(int grp);
@@ -323,6 +341,13 @@ struct sparx5_ops {
irqreturn_t (*ptp_irq_handler)(int irq, void *args);
int (*dsm_calendar_calc)(struct sparx5 *sparx5, u32 taxi,
struct sparx5_calendar_data *data);
+ int (*port_config_rgmii)(struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+ int (*fdma_init)(struct sparx5 *sparx5);
+ int (*fdma_deinit)(struct sparx5 *sparx5);
+ int (*fdma_poll)(struct napi_struct *napi, int weight);
+ int (*fdma_xmit)(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
};
struct sparx5_main_io_resource {
@@ -433,10 +458,16 @@ int sparx5_manual_injection_mode(struct sparx5 *sparx5);
void sparx5_port_inj_timer_setup(struct sparx5_port *port);
/* sparx5_fdma.c */
+int sparx5_fdma_init(struct sparx5 *sparx5);
+int sparx5_fdma_deinit(struct sparx5 *sparx5);
int sparx5_fdma_start(struct sparx5 *sparx5);
int sparx5_fdma_stop(struct sparx5 *sparx5);
-int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb);
+int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight);
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
irqreturn_t sparx5_fdma_handler(int irq, void *args);
+void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma);
+void sparx5_fdma_injection_mode(struct sparx5 *sparx5);
/* sparx5_mactable.c */
void sparx5_mact_pull_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index 561344f19062..d9ef4ef137b8 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -37,6 +37,7 @@ enum sparx5_target {
TARGET_FDMA = 117,
TARGET_GCB = 118,
TARGET_HSCH = 119,
+ TARGET_HSIO_WRAP = 120,
TARGET_LRN = 122,
TARGET_PCEP = 129,
TARGET_PCS10G_BR = 132,
@@ -54,6 +55,7 @@ enum sparx5_target {
TARGET_VCAP_SUPER = 326,
TARGET_VOP = 327,
TARGET_XQS = 331,
+ TARGET_DEVRGMII = 392,
NUM_TARGETS = 517
};
@@ -5367,6 +5369,69 @@ extern const struct sparx5_regs *regs;
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\
FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:XMII_CFG */
+#define HSIO_WRAP_XMII_CFG(g) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 0, 0, 1, 4)
+
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG GENMASK(2, 1)
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(x)\
+ FIELD_PREP(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x)
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_GET(x)\
+ FIELD_GET(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x)
+
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:RGMII_CFG */
+#define HSIO_WRAP_RGMII_CFG(g) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 4, 0, 1, 4)
+
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG GENMASK(4, 2)
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x)
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x)
+
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST BIT(1)
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x)
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x)
+
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST BIT(0)
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x)
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x)
+
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:DLL_CFG */
+#define HSIO_WRAP_DLL_CFG(g, r) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 12, r, 2, 4)
+
+#define HSIO_WRAP_DLL_CFG_DLL_ENA BIT(19)
+#define HSIO_WRAP_DLL_CFG_DLL_ENA_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_ENA, x)
+#define HSIO_WRAP_DLL_CFG_DLL_ENA_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_ENA, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA BIT(18)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL GENMASK(17, 15)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_RST BIT(0)
+#define HSIO_WRAP_DLL_CFG_DLL_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_RST, x)
+#define HSIO_WRAP_DLL_CFG_DLL_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_RST, x)
+
/* LRN:COMMON:COMMON_ACCESS_CTRL */
#define LRN_COMMON_ACCESS_CTRL \
__REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
@@ -8110,4 +8175,84 @@ extern const struct sparx5_regs *regs;
#define XQS_CNT(g) \
__REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+/* LAN969X ONLY */
+/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEVRGMII_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 0, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEVRGMII_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_ENA_CFG_RX_ENA, x)
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_ENA_CFG_TX_ENA, x)
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_ENA_CFG_TX_ENA, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEVRGMII_MAC_TAGS_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 12, 0, 1, 4)
+
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x)
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(3)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA GENMASK(2, 1)
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEVRGMII_MAC_IFG_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 24, 0, 1, 4)
+
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_TX_IFG, x)
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x)
+
#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
index 9806729e9c62..76097761fa97 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
@@ -12,7 +12,6 @@
#define SPX5_MIRROR_DISABLED 0
#define SPX5_MIRROR_EGRESS 1
#define SPX5_MIRROR_INGRESS 2
-#define SPX5_MIRROR_MONITOR_PORT_DEFAULT 65
#define SPX5_QFWD_MP_OFFSET 9 /* Mirror port offset in the QFWD register */
/* Convert from bool ingress/egress to mirror direction */
@@ -200,7 +199,7 @@ void sparx5_mirror_del(struct sparx5_mall_entry *entry)
sparx5_mirror_monitor_set(sparx5,
mirror_idx,
- SPX5_MIRROR_MONITOR_PORT_DEFAULT);
+ sparx5->data->consts->n_ports);
}
void sparx5_mirror_stats(struct sparx5_mall_entry *entry,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index b6f635d85820..f713656f1fae 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -232,9 +232,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
+ const struct sparx5_ops *ops;
u32 ifh[IFH_LEN];
netdev_tx_t ret;
+ ops = sparx5->data->ops;
+
memset(ifh, 0, IFH_LEN * 4);
sparx5_set_port_ifh(sparx5, ifh, port->portno);
@@ -254,7 +257,7 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
spin_lock(&sparx5->tx_lock);
if (sparx5->fdma_irq > 0)
- ret = sparx5_fdma_xmit(sparx5, ifh, skb);
+ ret = ops->fdma_xmit(sparx5, ifh, skb, dev);
else
ret = sparx5_inject(sparx5, ifh, skb, dev);
spin_unlock(&sparx5->tx_lock);
@@ -264,6 +267,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
if (ret < 0)
goto drop;
+ if (!is_sparx5(sparx5))
+ /* When lan969x and TX_OK, stats and SKB consumption is handled
+ * in the TX completion loop, so dont go any further.
+ */
+ return NETDEV_TX_OK;
+
stats->tx_bytes += skb->len;
stats->tx_packets++;
sparx5->tx.packets++;
@@ -366,6 +375,6 @@ irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
void sparx5_port_inj_timer_setup(struct sparx5_port *port)
{
- hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->inj_timer.function = sparx5_injection_timeout;
+ hrtimer_setup(&port->inj_timer, sparx5_injection_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index f8562c1a894d..cfb4b2e17ace 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -32,7 +32,19 @@ sparx5_phylink_mac_select_pcs(struct phylink_config *config,
{
struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
- return &port->phylink_pcs;
+ /* Return the PCS for all the modes that require it. */
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ return &port->phylink_pcs;
+ default:
+ return NULL;
+ }
}
static void sparx5_phylink_mac_config(struct phylink_config *config,
@@ -77,7 +89,7 @@ static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct sparx5_port, phylink_pcs);
}
-static void sparx5_pcs_get_state(struct phylink_pcs *pcs,
+static void sparx5_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct sparx5_port *port = sparx5_pcs_to_port(pcs);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 1401761c6251..04bc8fffaf96 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -257,6 +257,15 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5,
conf->speed != SPEED_25000))
return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ if (conf->speed != SPEED_1000 &&
+ conf->speed != SPEED_100 &&
+ conf->speed != SPEED_10)
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ break;
default:
return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
}
@@ -994,6 +1003,7 @@ int sparx5_port_config(struct sparx5 *sparx5,
struct sparx5_port *port,
struct sparx5_port_config *conf)
{
+ bool rgmii = phy_interface_mode_is_rgmii(conf->phy_mode);
bool high_speed_dev = sparx5_is_baser(conf->portmode);
const struct sparx5_ops *ops = sparx5->data->ops;
int err, urgency, stop_wm;
@@ -1002,8 +1012,14 @@ int sparx5_port_config(struct sparx5 *sparx5,
if (err)
return err;
+ if (rgmii) {
+ err = ops->port_config_rgmii(port, conf);
+ if (err)
+ return err;
+ }
+
/* high speed device is already configured */
- if (!high_speed_dev)
+ if (!rgmii && !high_speed_dev)
sparx5_port_config_low_set(sparx5, port, conf);
/* Configure flow control */
@@ -1067,24 +1083,6 @@ int sparx5_port_init(struct sparx5 *sparx5,
if (err)
return err;
- /* Configure MAC vlan awareness */
- err = sparx5_port_max_tags_set(sparx5, port);
- if (err)
- return err;
-
- /* Set Max Length */
- spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
- DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
- sparx5,
- DEV2G5_MAC_MAXLEN_CFG(port->portno));
-
- /* 1G/2G5: Signal Detect configuration */
- spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
- DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
- DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
- sparx5,
- DEV2G5_PCS1G_SD_CFG(port->portno));
-
/* Set Pause WM hysteresis */
spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
@@ -1108,6 +1106,27 @@ int sparx5_port_init(struct sparx5 *sparx5,
ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
sparx5, ANA_CL_FILTER_CTRL(port->portno));
+ if (ops->is_port_rgmii(port->portno))
+ return 0; /* RGMII device - nothing more to configure */
+
+ /* Configure MAC vlan awareness */
+ err = sparx5_port_max_tags_set(sparx5, port);
+ if (err)
+ return err;
+
+ /* Set Max Length */
+ spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+ DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
+ sparx5,
+ DEV2G5_MAC_MAXLEN_CFG(port->portno));
+
+ /* 1G/2G5: Signal Detect configuration */
+ spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
+ DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
+ DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
+ sparx5,
+ DEV2G5_PCS1G_SD_CFG(port->portno));
+
if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
conf->portmode == PHY_INTERFACE_MODE_SGMII) {
err = sparx5_serdes_set(sparx5, port, conf);
@@ -1151,7 +1170,7 @@ int sparx5_port_init(struct sparx5 *sparx5,
spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
devinst,
- DEV10G_MAC_ENA_CFG(0));
+ DEV10G_MAC_MAXLEN_CFG(0));
/* Handle Signal Detect in 10G PCS */
spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
index 9b9bcc6834bc..c8a37468a3d1 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
@@ -40,6 +40,11 @@ static inline bool sparx5_port_is_25g(int portno)
return portno >= 56 && portno <= 63;
}
+static inline bool sparx5_port_is_rgmii(int portno)
+{
+ return false;
+}
+
static inline u32 sparx5_to_high_dev(struct sparx5 *sparx5, int port)
{
const struct sparx5_ops *ops = sparx5->data->ops;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
index 1c2903700a9c..2f168700f63c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -303,7 +303,6 @@ void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
}
-EXPORT_SYMBOL_GPL(sparx5_get_hwtimestamp);
irqreturn_t sparx5_ptp_irq_handler(int irq, void *args)
{
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index e97af7ac2bb2..52cf7112762c 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -31,6 +31,9 @@ static void mana_gd_init_pf_regs(struct pci_dev *pdev)
gc->db_page_base = gc->bar0_va +
mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+ gc->phys_db_page_base = gc->bar0_pa +
+ mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+
sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
sriov_base_va = gc->bar0_va + sriov_base_off;
@@ -134,9 +137,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
struct gdma_list_devices_resp resp = {};
struct gdma_general_req req = {};
struct gdma_dev_id dev;
- u32 i, max_num_devs;
+ int found_dev = 0;
u16 dev_type;
int err;
+ u32 i;
mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
sizeof(resp));
@@ -148,12 +152,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
-
- for (i = 0; i < max_num_devs; i++) {
+ for (i = 0; i < GDMA_DEV_LIST_SIZE &&
+ found_dev < resp.num_of_devs; i++) {
dev = resp.devs[i];
dev_type = dev.type;
+ /* Skip empty devices */
+ if (dev.as_uint32 == 0)
+ continue;
+
+ found_dev++;
+
/* HWC is already detected in mana_hwc_create_channel(). */
if (dev_type == GDMA_DEVICE_HWC)
continue;
@@ -177,7 +186,7 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
}
-EXPORT_SYMBOL_NS(mana_gd_send_request, NET_MANA);
+EXPORT_SYMBOL_NS(mana_gd_send_request, "NET_MANA");
int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
struct gdma_mem_info *gmi)
@@ -331,6 +340,7 @@ void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
}
+EXPORT_SYMBOL_NS(mana_gd_wq_ring_doorbell, "NET_MANA");
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
{
@@ -343,6 +353,7 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
head, arm_bit);
}
+EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA");
static void mana_gd_process_eqe(struct gdma_queue *eq)
{
@@ -383,6 +394,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
case GDMA_EQE_HWC_INIT_DATA:
case GDMA_EQE_HWC_INIT_DONE:
+ case GDMA_EQE_HWC_SOC_SERVICE:
case GDMA_EQE_RNIC_QP_FATAL:
if (!eq->eq.callback)
break;
@@ -666,8 +678,11 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
queue->head = 0;
queue->tail = 0;
@@ -688,6 +703,8 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
@@ -716,7 +733,7 @@ int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)
return 0;
}
-EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
+EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, "NET_MANA");
static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_mem_info *gmi)
@@ -770,7 +787,13 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
}
gmi->dma_region_handle = resp.dma_region_handle;
+ dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n",
+ gmi->dma_region_handle);
out:
+ if (err)
+ dev_dbg(gc->dev,
+ "Failed to create DMA region of length: %u, page_type: %d, status: 0x%x, err: %d\n",
+ length, req->gdma_page_type, resp.hdr.status, err);
kfree(req);
return err;
}
@@ -793,8 +816,11 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -815,12 +841,14 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
return err;
}
-EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, NET_MANA);
+EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, "NET_MANA");
int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
const struct gdma_queue_spec *spec,
@@ -841,8 +869,11 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -862,11 +893,14 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
return err;
}
+EXPORT_SYMBOL_NS(mana_gd_create_mana_wq_cq, "NET_MANA");
void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
{
@@ -897,7 +931,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_free_memory(gmi);
kfree(queue);
}
-EXPORT_SYMBOL_NS(mana_gd_destroy_queue, NET_MANA);
+EXPORT_SYMBOL_NS(mana_gd_destroy_queue, "NET_MANA");
int mana_gd_verify_vf_version(struct pci_dev *pdev)
{
@@ -934,6 +968,7 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
err, resp.hdr.status);
return err ? err : -EPROTO;
}
+ gc->pf_cap_flags1 = resp.pf_cap_flags1;
if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
if (err) {
@@ -974,7 +1009,6 @@ int mana_gd_register_device(struct gdma_dev *gd)
return 0;
}
-EXPORT_SYMBOL_NS(mana_gd_register_device, NET_MANA);
int mana_gd_deregister_device(struct gdma_dev *gd)
{
@@ -1005,7 +1039,6 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
return err;
}
-EXPORT_SYMBOL_NS(mana_gd_deregister_device, NET_MANA);
u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
{
@@ -1041,7 +1074,7 @@ static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
if (oob_in_sgl) {
- WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
+ WARN_ON_ONCE(wqe_req->num_sge < 2);
header->client_oob_in_sgl = 1;
@@ -1148,6 +1181,7 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
return 0;
}
+EXPORT_SYMBOL_NS(mana_gd_post_work_request, "NET_MANA");
int mana_gd_post_and_ring(struct gdma_queue *queue,
const struct gdma_wqe_request *wqe_req,
@@ -1157,8 +1191,11 @@ int mana_gd_post_and_ring(struct gdma_queue *queue,
int err;
err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to post work req from queue type %d of size %u (err=%d)\n",
+ queue->type, queue->queue_size, err);
return err;
+ }
mana_gd_wq_ring_doorbell(gc, queue);
@@ -1218,6 +1255,7 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
return cqe_idx;
}
+EXPORT_SYMBOL_NS(mana_gd_poll_cq, "NET_MANA");
static irqreturn_t mana_gd_intr(int irq, void *arg)
{
@@ -1318,7 +1356,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
GFP_KERNEL);
if (!gc->irq_contexts) {
err = -ENOMEM;
- goto free_irq_vector;
+ goto free_irq_array;
}
for (i = 0; i < nvec; i++) {
@@ -1375,6 +1413,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
gc->max_num_msix = nvec;
gc->num_msix_usable = nvec;
cpus_read_unlock();
+ kfree(irqs);
return 0;
free_irq:
@@ -1387,8 +1426,9 @@ free_irq:
}
kfree(gc->irq_contexts);
- kfree(irqs);
gc->irq_contexts = NULL;
+free_irq_array:
+ kfree(irqs);
free_irq_vector:
cpus_read_unlock();
pci_free_irq_vectors(pdev);
@@ -1432,9 +1472,15 @@ static int mana_gd_setup(struct pci_dev *pdev)
mana_gd_init_registers(pdev);
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
+ gc->service_wq = alloc_ordered_workqueue("gdma_service_wq", 0);
+ if (!gc->service_wq)
+ return -ENOMEM;
+
err = mana_gd_setup_irqs(pdev);
- if (err)
- return err;
+ if (err) {
+ dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
+ goto free_workqueue;
+ }
err = mana_hwc_create_channel(gc);
if (err)
@@ -1452,12 +1498,16 @@ static int mana_gd_setup(struct pci_dev *pdev)
if (err)
goto destroy_hwc;
+ dev_dbg(&pdev->dev, "mana gdma setup successful\n");
return 0;
destroy_hwc:
mana_hwc_destroy_channel(gc);
remove_irq:
mana_gd_remove_irqs(pdev);
+free_workqueue:
+ destroy_workqueue(gc->service_wq);
+ dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
return err;
}
@@ -1468,6 +1518,9 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_hwc_destroy_channel(gc);
mana_gd_remove_irqs(pdev);
+
+ destroy_workqueue(gc->service_wq);
+ dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
}
static bool mana_is_pf(unsigned short dev_id)
@@ -1486,8 +1539,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
err = pci_enable_device(pdev);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", err);
return -ENXIO;
+ }
pci_set_master(pdev);
@@ -1496,9 +1551,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto disable_dev;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "DMA set mask failed: %d\n", err);
goto release_region;
-
+ }
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
err = -ENOMEM;
@@ -1533,8 +1589,14 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto cleanup_gd;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ goto cleanup_mana;
+
return 0;
+cleanup_mana:
+ mana_remove(&gc->mana, false);
cleanup_gd:
mana_gd_cleanup(pdev);
unmap_bar:
@@ -1545,6 +1607,7 @@ unmap_bar:
* adapter-MTU file and apc->mana_pci_debugfs folder.
*/
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
@@ -1561,18 +1624,23 @@ static void mana_gd_remove(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, false);
mana_gd_cleanup(pdev);
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
pci_iounmap(pdev, gc->bar0_va);
vfree(gc);
pci_release_regions(pdev);
pci_disable_device(pdev);
+
+ dev_dbg(&pdev->dev, "mana gdma remove successful\n");
}
/* The 'state' parameter is not used. */
@@ -1580,6 +1648,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
@@ -1604,6 +1673,10 @@ static int mana_gd_resume(struct pci_dev *pdev)
if (err)
return err;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ return err;
+
return 0;
}
@@ -1614,12 +1687,15 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
dev_info(&pdev->dev, "Shutdown was called\n");
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
pci_disable_device(pdev);
}
@@ -1646,17 +1722,21 @@ static int __init mana_driver_init(void)
mana_debugfs_root = debugfs_create_dir("mana", NULL);
err = pci_register_driver(&mana_driver);
- if (err)
+ if (err) {
debugfs_remove(mana_debugfs_root);
+ mana_debugfs_root = NULL;
+ }
return err;
}
static void __exit mana_driver_exit(void)
{
+ pci_unregister_driver(&mana_driver);
+
debugfs_remove(mana_debugfs_root);
- pci_unregister_driver(&mana_driver);
+ mana_debugfs_root = NULL;
}
module_init(mana_driver_init);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index a00f915c5188..a8c4d8db75a5 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -112,11 +112,13 @@ out:
static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
struct gdma_event *event)
{
+ union hwc_init_soc_service_type service_data;
struct hw_channel_context *hwc = ctx;
struct gdma_dev *gd = hwc->gdma_dev;
union hwc_init_type_data type_data;
union hwc_init_eq_id_db eq_db;
u32 type, val;
+ int ret;
switch (event->type) {
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
@@ -199,7 +201,24 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
}
break;
+ case GDMA_EQE_HWC_SOC_SERVICE:
+ service_data.as_uint32 = event->details[0];
+ type = service_data.type;
+ switch (type) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ ret = mana_rdma_service_event(gd->gdma_context, type);
+ if (ret)
+ dev_err(hwc->dev, "Failed to schedule adev service event: %d\n",
+ ret);
+ break;
+ default:
+ dev_warn(hwc->dev, "Received unknown SOC service type %u\n", type);
+ break;
+ }
+
+ break;
default:
dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
/* Ignore unknown events, which should never happen. */
@@ -440,7 +459,8 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
gmi = &dma_buf->mem_info;
err = mana_gd_alloc_memory(gc, buf_size, gmi);
if (err) {
- dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
+ dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n",
+ buf_size, err);
goto out;
}
@@ -529,6 +549,9 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
out:
if (err)
mana_hwc_destroy_wq(hwc, hwc_wq);
+
+ dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n",
+ queue_size, q_type, err);
return err;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index 23b1521c0df9..d30721d4516f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -91,7 +91,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
goto out;
xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
- xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
+ xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, true);
act = bpf_prog_run_xdp(prog, xdp);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 57ac732e7707..ccd2885c939e 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -13,6 +13,7 @@
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
@@ -52,10 +53,12 @@ static int mana_open(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
int err;
-
err = mana_alloc_queues(ndev);
- if (err)
+
+ if (err) {
+ netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
return err;
+ }
apc->port_is_up = true;
@@ -64,7 +67,7 @@ static int mana_open(struct net_device *ndev)
netif_carrier_on(ndev);
netif_tx_wake_all_queues(ndev);
-
+ netdev_dbg(ndev, "%s successful\n", __func__);
return 0;
}
@@ -176,6 +179,9 @@ static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
return 0;
frag_err:
+ if (net_ratelimit())
+ netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
+ skb->len);
for (i = sg_i - 1; i >= hsg; i--)
dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
@@ -256,6 +262,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_cow_head(skb, MANA_HEADROOM))
goto tx_drop_count;
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto tx_drop_count;
+
txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq;
@@ -652,30 +661,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
mpc->rxbpre_total = 0;
for (i = 0; i < num_rxb; i++) {
- if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
- va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
- if (!va)
- goto error;
-
- page = virt_to_head_page(va);
- /* Check if the frag falls back to single page */
- if (compound_order(page) <
- get_order(mpc->rxbpre_alloc_size)) {
- put_page(page);
- goto error;
- }
- } else {
- page = dev_alloc_page();
- if (!page)
- goto error;
+ page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
+ if (!page)
+ goto error;
- va = page_to_virt(page);
- }
+ va = page_to_virt(page);
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
- put_page(virt_to_head_page(va));
+ put_page(page);
goto error;
}
@@ -687,6 +682,7 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
return 0;
error:
+ netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
mana_pre_dealloc_rxbufs(mpc);
return -ENOMEM;
}
@@ -738,12 +734,11 @@ static const struct net_device_ops mana_devops = {
static void mana_cleanup_port_context(struct mana_port_context *apc)
{
/*
- * at this point all dir/files under the vport directory
- * are already cleaned up.
- * We are sure the apc->mana_port_debugfs remove will not
- * cause any freed memory access issues
+ * make sure subsequent cleanup attempts don't end up removing already
+ * cleaned dentry pointer
*/
debugfs_remove(apc->mana_port_debugfs);
+ apc->mana_port_debugfs = NULL;
kfree(apc->rxqs);
apc->rxqs = NULL;
}
@@ -926,7 +921,7 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
u32 proto_minor_ver, u32 proto_micro_ver,
- u16 *max_num_vports)
+ u16 *max_num_vports, u8 *bm_hostmode)
{
struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_query_device_cfg_resp resp = {};
@@ -937,7 +932,7 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
sizeof(req), sizeof(resp));
- req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
req.proto_major_ver = proto_major_ver;
req.proto_minor_ver = proto_minor_ver;
@@ -961,11 +956,16 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
*max_num_vports = resp.max_num_vports;
- if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
+ if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2)
gc->adapter_mtu = resp.adapter_mtu;
else
gc->adapter_mtu = ETH_FRAME_LEN;
+ if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V3)
+ *bm_hostmode = resp.bm_hostmode;
+ else
+ *bm_hostmode = 0;
+
debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
return 0;
@@ -1022,7 +1022,7 @@ void mana_uncfg_vport(struct mana_port_context *apc)
WARN_ON(apc->vport_use_count < 0);
mutex_unlock(&apc->vport_mutex);
}
-EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA);
+EXPORT_SYMBOL_NS(mana_uncfg_vport, "NET_MANA");
int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
u32 doorbell_pg_id)
@@ -1092,7 +1092,7 @@ out:
return err;
}
-EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA);
+EXPORT_SYMBOL_NS(mana_cfg_vport, "NET_MANA");
static int mana_cfg_vport_steering(struct mana_port_context *apc,
enum TRI_STATE rx,
@@ -1214,7 +1214,7 @@ int mana_create_wq_obj(struct mana_port_context *apc,
out:
return err;
}
-EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA);
+EXPORT_SYMBOL_NS(mana_create_wq_obj, "NET_MANA");
void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
mana_handle_t wq_obj)
@@ -1242,7 +1242,7 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
resp.hdr.status);
}
-EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA);
+EXPORT_SYMBOL_NS(mana_destroy_wq_obj, "NET_MANA");
static void mana_destroy_eq(struct mana_context *ac)
{
@@ -1254,6 +1254,7 @@ static void mana_destroy_eq(struct mana_context *ac)
return;
debugfs_remove_recursive(ac->mana_eqs_debugfs);
+ ac->mana_eqs_debugfs = NULL;
for (i = 0; i < gc->max_num_queues; i++) {
eq = ac->eqs[i].eq;
@@ -1304,8 +1305,10 @@ static int mana_create_eq(struct mana_context *ac)
for (i = 0; i < gc->max_num_queues; i++) {
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
goto out;
+ }
mana_create_eq_debugfs(ac, i);
}
@@ -1547,8 +1550,12 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
return NULL;
if (xdp->data_hard_start) {
+ u32 metasize = xdp->data - xdp->data_meta;
+
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
return skb;
}
@@ -1660,7 +1667,7 @@ drop:
}
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
- dma_addr_t *da, bool *from_pool, bool is_napi)
+ dma_addr_t *da, bool *from_pool)
{
struct page *page;
void *va;
@@ -1671,21 +1678,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
if (rxq->xdp_save_va) {
va = rxq->xdp_save_va;
rxq->xdp_save_va = NULL;
- } else if (rxq->alloc_size > PAGE_SIZE) {
- if (is_napi)
- va = napi_alloc_frag(rxq->alloc_size);
- else
- va = netdev_alloc_frag(rxq->alloc_size);
-
- if (!va)
- return NULL;
-
- page = virt_to_head_page(va);
- /* Check if the frag falls back to single page */
- if (compound_order(page) < get_order(rxq->alloc_size)) {
- put_page(page);
- return NULL;
- }
} else {
page = page_pool_dev_alloc_pages(rxq->page_pool);
if (!page)
@@ -1718,7 +1710,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
dma_addr_t da;
void *va;
- va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return;
@@ -1914,6 +1906,7 @@ static void mana_destroy_txq(struct mana_port_context *apc)
for (i = 0; i < apc->num_queues; i++) {
debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
+ apc->tx_qp[i].mana_tx_debugfs = NULL;
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
@@ -2080,6 +2073,8 @@ static int mana_create_txq(struct mana_port_context *apc,
return 0;
out:
+ netdev_err(net, "Failed to create %d TX queues, %d\n",
+ apc->num_queues, err);
mana_destroy_txq(apc);
return err;
}
@@ -2099,6 +2094,7 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
return;
debugfs_remove_recursive(rxq->mana_rx_debugfs);
+ rxq->mana_rx_debugfs = NULL;
napi = &rxq->rx_cq.napi;
@@ -2156,7 +2152,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
if (mpc->rxbufs_pre)
va = mana_get_rxbuf_pre(rxq, &da);
else
- va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return -ENOMEM;
@@ -2242,6 +2238,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
pprm.netdev = rxq->ndev;
+ pprm.order = get_order(rxq->alloc_size);
rxq->page_pool = page_pool_create(&pprm);
@@ -2415,6 +2412,7 @@ static int mana_add_rx_queues(struct mana_port_context *apc,
rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
if (!rxq) {
err = -ENOMEM;
+ netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
goto out;
}
@@ -2448,7 +2446,7 @@ static void mana_destroy_vport(struct mana_port_context *apc)
mana_destroy_txq(apc);
mana_uncfg_vport(apc);
- if (gd->gdma_context->is_pf)
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
mana_pf_deregister_hw_vport(apc);
}
@@ -2460,7 +2458,7 @@ static int mana_create_vport(struct mana_port_context *apc,
apc->default_rxobj = INVALID_MANA_HANDLE;
- if (gd->gdma_context->is_pf) {
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
err = mana_pf_register_hw_vport(apc);
if (err)
return err;
@@ -2536,6 +2534,7 @@ void mana_query_gf_stats(struct mana_port_context *apc)
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
sizeof(req), sizeof(resp));
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
STATISTICS_FLAGS_HC_RX_BYTES |
@@ -2660,12 +2659,18 @@ int mana_alloc_queues(struct net_device *ndev)
int err;
err = mana_create_vport(apc, ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
return err;
+ }
err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
err = mana_add_rx_queues(apc, ndev);
if (err)
@@ -2674,16 +2679,22 @@ int mana_alloc_queues(struct net_device *ndev)
apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
mana_rss_table_init(apc);
err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
goto destroy_vport;
+ }
- if (gd->gdma_context->is_pf) {
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
err = mana_pf_register_filter(apc);
if (err)
goto destroy_vport;
@@ -2745,7 +2756,7 @@ static int mana_dealloc_queues(struct net_device *ndev)
mana_chn_setxdp(apc, NULL);
- if (gd->gdma_context->is_pf)
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
mana_pf_deregister_filter(apc);
/* No packet can be transmitted now since apc->port_is_up is false.
@@ -2822,8 +2833,10 @@ int mana_detach(struct net_device *ndev, bool from_close)
if (apc->port_st_save) {
err = mana_dealloc_queues(ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
return err;
+ }
}
if (!from_close) {
@@ -2872,6 +2885,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->dev_port = port_idx;
SET_NETDEV_DEV(ndev, gc->dev);
+ netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
+
netif_carrier_off(ndev);
netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
@@ -2935,7 +2950,7 @@ static void remove_adev(struct gdma_dev *gd)
gd->adev = NULL;
}
-static int add_adev(struct gdma_dev *gd)
+static int add_adev(struct gdma_dev *gd, const char *name)
{
struct auxiliary_device *adev;
struct mana_adev *madev;
@@ -2951,7 +2966,7 @@ static int add_adev(struct gdma_dev *gd)
goto idx_fail;
adev->id = ret;
- adev->name = "rdma";
+ adev->name = name;
adev->dev.parent = gd->gdma_context->dev;
adev->dev.release = adev_release;
madev->mdev = gd;
@@ -2967,6 +2982,8 @@ static int add_adev(struct gdma_dev *gd)
goto add_fail;
gd->adev = adev;
+ dev_dbg(gd->gdma_context->dev,
+ "Auxiliary device added successfully\n");
return 0;
add_fail:
@@ -2981,11 +2998,76 @@ idx_fail:
return ret;
}
+static void mana_rdma_service_handle(struct work_struct *work)
+{
+ struct mana_service_work *serv_work =
+ container_of(work, struct mana_service_work, work);
+ struct gdma_dev *gd = serv_work->gdma_dev;
+ struct device *dev = gd->gdma_context->dev;
+ int ret;
+
+ if (READ_ONCE(gd->rdma_teardown))
+ goto out;
+
+ switch (serv_work->event) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ if (!gd->adev || gd->is_suspended)
+ break;
+
+ remove_adev(gd);
+ gd->is_suspended = true;
+ break;
+
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ if (!gd->is_suspended)
+ break;
+
+ ret = add_adev(gd, "rdma");
+ if (ret)
+ dev_err(dev, "Failed to add adev on resume: %d\n", ret);
+ else
+ gd->is_suspended = false;
+ break;
+
+ default:
+ dev_warn(dev, "unknown adev service event %u\n",
+ serv_work->event);
+ break;
+ }
+
+out:
+ kfree(serv_work);
+}
+
+int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event)
+{
+ struct gdma_dev *gd = &gc->mana_ib;
+ struct mana_service_work *serv_work;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return 0;
+ }
+
+ serv_work = kzalloc(sizeof(*serv_work), GFP_ATOMIC);
+ if (!serv_work)
+ return -ENOMEM;
+
+ serv_work->event = event;
+ serv_work->gdma_dev = gd;
+
+ INIT_WORK(&serv_work->work, mana_rdma_service_handle);
+ queue_work(gc->service_wq, &serv_work->work);
+
+ return 0;
+}
+
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
struct device *dev = gc->dev;
+ u8 bm_hostmode = 0;
u16 num_ports = 0;
int err;
int i;
@@ -3008,14 +3090,18 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
err = mana_create_eq(ac);
- if (err)
+ if (err) {
+ dev_err(dev, "Failed to create EQs: %d\n", err);
goto out;
+ }
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
- MANA_MICRO_VERSION, &num_ports);
+ MANA_MICRO_VERSION, &num_ports, &bm_hostmode);
if (err)
goto out;
+ ac->bm_hostmode = bm_hostmode;
+
if (!resuming) {
ac->num_ports = num_ports;
} else {
@@ -3063,10 +3149,16 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
}
- err = add_adev(gd);
+ err = add_adev(gd, "eth");
out:
- if (err)
+ if (err) {
mana_remove(gd, false);
+ } else {
+ dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
+ gd, gd->dev_id.as_uint32, ac->num_ports,
+ gd->dev_id.type, gd->dev_id.instance);
+ dev_dbg(dev, "%s succeeded\n", __func__);
+ }
return err;
}
@@ -3128,23 +3220,68 @@ out:
gd->driver_data = NULL;
gd->gdma_context = NULL;
kfree(ac);
+ dev_dbg(dev, "%s succeeded\n", __func__);
}
-struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index)
+int mana_rdma_probe(struct gdma_dev *gd)
+{
+ int err = 0;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return err;
+ }
+
+ err = mana_gd_register_device(gd);
+ if (err)
+ return err;
+
+ err = add_adev(gd, "rdma");
+ if (err)
+ mana_gd_deregister_device(gd);
+
+ return err;
+}
+
+void mana_rdma_remove(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return;
+ }
+
+ WRITE_ONCE(gd->rdma_teardown, true);
+ flush_workqueue(gc->service_wq);
+
+ if (gd->adev)
+ remove_adev(gd);
+
+ mana_gd_deregister_device(gd);
+}
+
+struct net_device *mana_get_primary_netdev(struct mana_context *ac,
+ u32 port_index,
+ netdevice_tracker *tracker)
{
struct net_device *ndev;
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
- "Taking primary netdev without holding the RCU read lock");
if (port_index >= ac->num_ports)
return NULL;
- /* When mana is used in netvsc, the upper netdevice should be returned. */
- if (ac->ports[port_index]->flags & IFF_SLAVE)
- ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
- else
+ rcu_read_lock();
+
+ /* If mana is used in netvsc, the upper netdevice should be returned. */
+ ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
+
+ /* If there is no upper device, use the parent Ethernet device */
+ if (!ndev)
ndev = ac->ports[port_index];
+ netdev_hold(ndev, tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+
return ndev;
}
-EXPORT_SYMBOL_NS(mana_get_primary_netdev_rcu, NET_MANA);
+EXPORT_SYMBOL_NS(mana_get_primary_netdev, "NET_MANA");
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 3d72aa7b1305..08bee56aea35 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -830,6 +830,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare);
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
/* Ignore VID 0 added to our RX filter by the 8021q module, since
@@ -849,6 +850,11 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
ocelot_bridge_vlan_find(ocelot, vid));
if (err)
return err;
+ } else if (ocelot_port->pvid_vlan &&
+ ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) {
+ err = ocelot_port_set_pvid(ocelot, port, NULL);
+ if (err)
+ return err;
}
/* Untagged egress vlan clasification */
@@ -1432,7 +1438,7 @@ void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
memset(ifh, 0, OCELOT_TAG_LEN);
ocelot_ifh_set_bypass(ifh, 1);
- ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports));
+ ocelot_ifh_set_src(ifh, ocelot->num_phys_ports);
ocelot_ifh_set_dest(ifh, BIT_ULL(port));
ocelot_ifh_set_qos_class(ifh, qos_class);
ocelot_ifh_set_tag_type(ifh, tag_type);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 558e03301aa8..469784d3a1a6 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -758,12 +758,13 @@ static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data)
{
struct ocelot_dump_ctx *dump = data;
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
@@ -869,23 +870,30 @@ static int ocelot_set_features(struct net_device *dev,
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
+static int ocelot_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
- /* If the attached PHY device isn't capable of timestamping operations,
- * use our own (when possible).
- */
- if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return ocelot_hwstamp_set(ocelot, port, ifr);
- case SIOCGHWTSTAMP:
- return ocelot_hwstamp_get(ocelot, port, ifr);
- }
- }
+ ocelot_hwstamp_get(ocelot, port, cfg);
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return 0;
+}
+
+static int ocelot_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
+
+ return ocelot_hwstamp_set(ocelot, port, cfg, extack);
}
static int ocelot_change_mtu(struct net_device *dev, int new_mtu)
@@ -916,6 +924,8 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_set_features = ocelot_set_features,
.ndo_setup_tc = ocelot_setup_tc,
.ndo_eth_ioctl = ocelot_ioctl,
+ .ndo_hwtstamp_get = ocelot_port_hwtstamp_get,
+ .ndo_hwtstamp_set = ocelot_port_hwtstamp_set,
};
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
@@ -992,6 +1002,16 @@ static int ocelot_port_get_ts_info(struct net_device *dev,
return ocelot_get_ts_info(ocelot, port, info);
}
+static void ocelot_port_ts_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
+
+ ocelot_port_get_ts_stats(ocelot, port, ts_stats);
+}
+
static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_port_get_strings,
.get_ethtool_stats = ocelot_port_get_ethtool_stats,
@@ -999,6 +1019,7 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ts_info = ocelot_port_get_ts_info,
+ .get_ts_stats = ocelot_port_ts_stats,
};
static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index e172638b0601..88b5422cc2a0 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -14,6 +14,8 @@
#include <soc/mscc/ocelot.h>
#include "ocelot.h"
+#define OCELOT_PTP_TX_TSTAMP_TIMEOUT (5 * HZ)
+
int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
@@ -209,11 +211,6 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
- PTP_PEROUT_PHASE))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
if (pin == 0)
@@ -495,58 +492,64 @@ static int ocelot_traps_to_ptp_rx_filter(unsigned int proto)
return HWTSTAMP_FILTER_NONE;
}
-int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
+static int ocelot_ptp_tx_type_to_cmd(int tx_type, int *ptp_cmd)
+{
+ switch (tx_type) {
+ case HWTSTAMP_TX_ON:
+ *ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ /* IFH_REW_OP_ONE_STEP_PTP updates the correctionField,
+ * what we need to update is the originTimestamp.
+ */
+ *ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ *ptp_cmd = 0;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+void ocelot_hwstamp_get(struct ocelot *ocelot, int port,
+ struct kernel_hwtstamp_config *cfg)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct hwtstamp_config cfg = {};
switch (ocelot_port->ptp_cmd) {
case IFH_REW_OP_TWO_STEP_PTP:
- cfg.tx_type = HWTSTAMP_TX_ON;
+ cfg->tx_type = HWTSTAMP_TX_ON;
break;
case IFH_REW_OP_ORIGIN_PTP:
- cfg.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ cfg->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
break;
default:
- cfg.tx_type = HWTSTAMP_TX_OFF;
+ cfg->tx_type = HWTSTAMP_TX_OFF;
break;
}
- cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ cfg->rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
}
EXPORT_SYMBOL(ocelot_hwstamp_get);
-int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
bool l2 = false, l4 = false;
- struct hwtstamp_config cfg;
+ int ptp_cmd;
int err;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
/* Tx type sanity check */
- switch (cfg.tx_type) {
- case HWTSTAMP_TX_ON:
- ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
- break;
- case HWTSTAMP_TX_ONESTEP_SYNC:
- /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
- * need to update the origin time.
- */
- ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
- break;
- case HWTSTAMP_TX_OFF:
- ocelot_port->ptp_cmd = 0;
- break;
- default:
- return -ERANGE;
- }
+ err = ocelot_ptp_tx_type_to_cmd(cfg->tx_type, &ptp_cmd);
+ if (err)
+ return err;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -573,9 +576,11 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
if (err)
return err;
- cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
+ ocelot_port->ptp_cmd = ptp_cmd;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ cfg->rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
+
+ return 0;
}
EXPORT_SYMBOL(ocelot_hwstamp_set);
@@ -603,34 +608,92 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_get_ts_info);
-static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
- struct sk_buff *clone)
+static struct sk_buff *ocelot_port_dequeue_ptp_tx_skb(struct ocelot *ocelot,
+ int port, u8 ts_id,
+ u32 seqid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- unsigned long flags;
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct ptp_header *hdr;
- spin_lock_irqsave(&ocelot->ts_id_lock, flags);
+ spin_lock(&ocelot->ts_id_lock);
- if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
- ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
- spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
- return -EBUSY;
+ skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
+ if (OCELOT_SKB_CB(skb)->ts_id != ts_id)
+ continue;
+
+ /* Check that the timestamp ID is for the expected PTP
+ * sequenceId. We don't have to test ptp_parse_header() against
+ * NULL, because we've pre-validated the packet's ptp_class.
+ */
+ hdr = ptp_parse_header(skb, OCELOT_SKB_CB(skb)->ptp_class);
+ if (seqid != ntohs(hdr->sequence_id))
+ continue;
+
+ __skb_unlink(skb, &ocelot_port->tx_skbs);
+ ocelot->ptp_skbs_in_flight--;
+ skb_match = skb;
+ break;
}
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
- /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
- OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
+ spin_unlock(&ocelot->ts_id_lock);
- ocelot_port->ts_id++;
- if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
- ocelot_port->ts_id = 0;
+ return skb_match;
+}
- ocelot_port->ptp_skbs_in_flight++;
+static int ocelot_port_queue_ptp_tx_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ DECLARE_BITMAP(ts_id_in_flight, OCELOT_MAX_PTP_ID);
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long n;
+
+ spin_lock(&ocelot->ts_id_lock);
+
+ /* To get a better chance of acquiring a timestamp ID, first flush the
+ * stale packets still waiting in the TX timestamping queue. They are
+ * probably lost.
+ */
+ skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
+ if (time_before(OCELOT_SKB_CB(skb)->ptp_tx_time +
+ OCELOT_PTP_TX_TSTAMP_TIMEOUT, jiffies)) {
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->lost++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
+ dev_dbg_ratelimited(ocelot->dev,
+ "port %d invalidating stale timestamp ID %u which seems lost\n",
+ port, OCELOT_SKB_CB(skb)->ts_id);
+
+ __skb_unlink(skb, &ocelot_port->tx_skbs);
+ kfree_skb(skb);
+ ocelot->ptp_skbs_in_flight--;
+ } else {
+ __set_bit(OCELOT_SKB_CB(skb)->ts_id, ts_id_in_flight);
+ }
+ }
+
+ if (ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
+ spin_unlock(&ocelot->ts_id_lock);
+ return -EBUSY;
+ }
+
+ n = find_first_zero_bit(ts_id_in_flight, OCELOT_MAX_PTP_ID);
+ if (n == OCELOT_MAX_PTP_ID) {
+ spin_unlock(&ocelot->ts_id_lock);
+ return -EBUSY;
+ }
+
+ /* Found an available timestamp ID, use it */
+ OCELOT_SKB_CB(clone)->ts_id = n;
+ OCELOT_SKB_CB(clone)->ptp_tx_time = jiffies;
ocelot->ptp_skbs_in_flight++;
+ __skb_queue_tail(&ocelot_port->tx_skbs, clone);
- skb_queue_tail(&ocelot_port->tx_skbs, clone);
+ spin_unlock(&ocelot->ts_id_lock);
- spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+ dev_dbg_ratelimited(ocelot->dev, "port %d timestamp id %lu\n", port, n);
return 0;
}
@@ -668,13 +731,20 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
return 0;
ptp_class = ptp_classify_raw(skb);
- if (ptp_class == PTP_CLASS_NONE)
- return -EINVAL;
+ if (ptp_class == PTP_CLASS_NONE) {
+ err = -EINVAL;
+ goto error;
+ }
/* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->onestep_pkts_unconfirmed++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
return 0;
}
@@ -684,18 +754,30 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
*clone = skb_clone_sk(skb);
- if (!(*clone))
- return -ENOMEM;
+ if (!(*clone)) {
+ err = -ENOMEM;
+ goto error;
+ }
- err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
- if (err)
- return err;
+ /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
+ err = ocelot_port_queue_ptp_tx_skb(ocelot, port, *clone);
+ if (err) {
+ kfree_skb(*clone);
+ goto error;
+ }
+ skb_shinfo(*clone)->tx_flags |= SKBTX_IN_PROGRESS;
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
}
return 0;
+
+error:
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->err++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+ return err;
}
EXPORT_SYMBOL(ocelot_port_txtstamp_request);
@@ -726,28 +808,16 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
}
-static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
-{
- struct ptp_header *hdr;
-
- hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
- if (WARN_ON(!hdr))
- return false;
-
- return seqid == ntohs(hdr->sequence_id);
-}
-
void ocelot_get_txtstamp(struct ocelot *ocelot)
{
int budget = OCELOT_PTP_QUEUE_SZ;
while (budget--) {
- struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shhwtstamps;
+ struct ocelot_port *ocelot_port;
u32 val, id, seqid, txport;
- struct ocelot_port *port;
+ struct sk_buff *skb_match;
struct timespec64 ts;
- unsigned long flags;
val = ocelot_read(ocelot, SYS_PTP_STATUS);
@@ -761,38 +831,26 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
-
- port = ocelot->ports[txport];
-
- spin_lock(&ocelot->ts_id_lock);
- port->ptp_skbs_in_flight--;
- ocelot->ptp_skbs_in_flight--;
- spin_unlock(&ocelot->ts_id_lock);
+ ocelot_port = ocelot->ports[txport];
/* Retrieve its associated skb */
-try_again:
- spin_lock_irqsave(&port->tx_skbs.lock, flags);
-
- skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
- if (OCELOT_SKB_CB(skb)->ts_id != id)
- continue;
- __skb_unlink(skb, &port->tx_skbs);
- skb_match = skb;
- break;
+ skb_match = ocelot_port_dequeue_ptp_tx_skb(ocelot, txport, id,
+ seqid);
+ if (!skb_match) {
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->err++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
+ dev_dbg_ratelimited(ocelot->dev,
+ "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n",
+ txport, seqid, id);
+
+ goto next_ts;
}
- spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
-
- if (WARN_ON(!skb_match))
- continue;
-
- if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
- dev_err_ratelimited(ocelot->dev,
- "port %d received stale TX timestamp for seqid %d, discarding\n",
- txport, seqid);
- dev_kfree_skb_any(skb);
- goto try_again;
- }
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->pkts++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
/* Get the h/w timestamp */
ocelot_get_hwtimestamp(ocelot, &ts);
@@ -802,7 +860,7 @@ try_again:
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_complete_tx_timestamp(skb_match, &shhwtstamps);
- /* Next ts */
+next_ts:
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
}
}
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index c018783757fb..545710dadcf5 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -821,6 +821,26 @@ void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats);
+void ocelot_port_get_ts_stats(struct ocelot *ocelot, int port,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_ts_stats *stats = ocelot_port->ts_stats;
+ unsigned int start;
+
+ if (!ocelot->ptp)
+ return;
+
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ ts_stats->pkts = stats->pkts;
+ ts_stats->onestep_pkts_unconfirmed = stats->onestep_pkts_unconfirmed;
+ ts_stats->lost = stats->lost;
+ ts_stats->err = stats->err;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_ts_stats);
+
void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
struct rtnl_link_stats64 *stats)
{
@@ -960,6 +980,23 @@ int ocelot_stats_init(struct ocelot *ocelot)
if (!ocelot->stats)
return -ENOMEM;
+ if (ocelot->ptp) {
+ for (int port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port)
+ continue;
+
+ ocelot_port->ts_stats = devm_kzalloc(ocelot->dev,
+ sizeof(*ocelot_port->ts_stats),
+ GFP_KERNEL);
+ if (!ocelot_port->ts_stats)
+ return -ENOMEM;
+
+ u64_stats_init(&ocelot_port->ts_stats->syncp);
+ }
+ }
+
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 055b55651a49..498eec8ae61d 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -108,6 +108,8 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
.n_ext_ts = 0,
.n_per_out = OCELOT_PTP_PINS_NUM,
.n_pins = OCELOT_PTP_PINS_NUM,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE,
.pps = 0,
.gettime64 = ocelot_ptp_gettime64,
.settime64 = ocelot_ptp_settime64,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index b7d9657a7af3..e611ff7fa3fa 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2482,7 +2482,7 @@ static int myri10ge_close(struct net_device *dev)
if (mgp->ss[0].tx.req_bytes == NULL)
return 0;
- del_timer_sync(&mgp->watchdog_timer);
+ timer_delete_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
for (i = 0; i < mgp->num_slices; i++)
napi_disable(&mgp->ss[i].napi);
@@ -3478,7 +3478,7 @@ static void myri10ge_watchdog_timer(struct timer_list *t)
u32 rx_pause_cnt;
u16 cmd;
- mgp = from_timer(mgp, t, watchdog_timer);
+ mgp = timer_container_of(mgp, t, watchdog_timer);
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
busy_slice_cnt = 0;
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index ad0c14849115..b253734dbc80 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -846,7 +846,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- i = pci_request_regions(pdev, DRV_NAME);
+ i = pcim_request_all_regions(pdev, DRV_NAME);
if (i)
goto err_pci_request_regions;
@@ -1786,7 +1786,7 @@ static void init_registers(struct net_device *dev)
*/
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = np->dev;
void __iomem * ioaddr = ns_ioaddr(dev);
int next_tick = NATSEMI_TIMER_FREQ;
@@ -3179,7 +3179,7 @@ static int netdev_close(struct net_device *dev)
* the final WOL settings?
*/
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
disable_irq(irq);
spin_lock_irq(&np->lock);
natsemi_irq_disable(dev);
@@ -3278,7 +3278,7 @@ static int __maybe_unused natsemi_suspend(struct device *dev_d)
if (netif_running (dev)) {
const int irq = np->pci_dev->irq;
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
disable_irq(irq);
spin_lock_irq(&np->lock);
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index bea969dfa536..56d5464222d9 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1527,7 +1527,7 @@ static int ns83820_stop(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
/* FIXME: protect against interrupt handler? */
- del_timer_sync(&dev->tx_watchdog);
+ timer_delete_sync(&dev->tx_watchdog);
ns83820_disable_interrupts(dev);
@@ -1587,7 +1587,7 @@ static void ns83820_tx_timeout(struct net_device *ndev, unsigned int txqueue)
static void ns83820_tx_watch(struct timer_list *t)
{
- struct ns83820 *dev = from_timer(dev, t, tx_watchdog);
+ struct ns83820 *dev = timer_container_of(dev, t, tx_watchdog);
struct net_device *ndev = dev->ndev;
#if defined(DEBUG)
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index f8016dc25e0a..27443e346f9f 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4195,7 +4195,7 @@ pci_map_failed:
static void
s2io_alarm_handle(struct timer_list *t)
{
- struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
+ struct s2io_nic *sp = timer_container_of(sp, t, alarm_timer);
struct net_device *dev = sp->dev;
s2io_handle_errors(dev);
@@ -7019,7 +7019,7 @@ static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
if (!is_s2io_card_up(sp))
return;
- del_timer_sync(&sp->alarm_timer);
+ timer_delete_sync(&sp->alarm_timer);
/* If s2io_set_link task is executing, wait till it completes. */
while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
msleep(50);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 2ec62c8d86e1..59486fe2ad18 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
struct sk_buff *skb;
skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
skb_put(skb, size);
return skb;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 9d97cd281f18..c03558adda91 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
map_id_full = be64_to_cpu(cbe->map_ptr);
map_id = map_id_full;
- if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ if (size_add(pkt_size, data_size) > INT_MAX ||
+ len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL;
if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index 515069d5637b..9e7c285eaa6b 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -266,17 +266,17 @@ static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
}
}
-static int nfp_net_xfrm_add_state(struct xfrm_state *x,
+static int nfp_net_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xso.real_dev;
struct nfp_ipsec_cfg_mssg msg = {};
int i, key_len, trunc_len, err = 0;
struct nfp_ipsec_cfg_add_sa *cfg;
struct nfp_net *nn;
unsigned int saidx;
- nn = netdev_priv(netdev);
+ nn = netdev_priv(dev);
cfg = &msg.cfg_add_sa;
/* General */
@@ -546,17 +546,16 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
return 0;
}
-static void nfp_net_xfrm_del_state(struct xfrm_state *x)
+static void nfp_net_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
struct nfp_ipsec_cfg_mssg msg = {
.cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
.sa_idx = x->xso.offload_handle - 1,
};
- struct net_device *netdev = x->xso.real_dev;
struct nfp_net *nn;
int err;
- nn = netdev_priv(netdev);
+ nn = netdev_priv(dev);
err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
sizeof(msg), nfp_net_ipsec_cfg);
if (err)
@@ -565,20 +564,9 @@ static void nfp_net_xfrm_del_state(struct xfrm_state *x)
xa_erase(&nn->xa_ipsec, x->xso.offload_handle - 1);
}
-static bool nfp_net_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET)
- /* Offload with IPv4 options is not supported yet */
- return ip_hdr(skb)->ihl == 5;
-
- /* Offload with IPv6 extension headers is not support yet */
- return !(ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr));
-}
-
static const struct xfrmdev_ops nfp_net_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = nfp_net_xfrm_add_state,
.xdo_dev_state_delete = nfp_net_xfrm_del_state,
- .xdo_dev_offload_ok = nfp_net_ipsec_offload_ok,
};
void nfp_net_ipsec_init(struct nfp_net *nn)
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index f1c6c47564b1..08086eb76996 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -779,7 +779,7 @@ nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
case NFP_NET_META_CSUM:
meta->csum_type = CHECKSUM_COMPLETE;
meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
+ (__force __wsum)get_unaligned((u32 *)data);
data += 4;
break;
case NFP_NET_META_RESYNC_INFO:
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index ebeb6ab4465c..ab3cd06ed63e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -779,7 +779,7 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
case NFP_NET_META_CSUM:
meta->csum_type = CHECKSUM_COMPLETE;
meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
+ (__force __wsum)get_unaligned((u32 *)data);
data += 4;
break;
case NFP_NET_META_RESYNC_INFO:
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
index 0d6c59d6d4ae..ea6a288c0d5e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
@@ -83,42 +83,12 @@ nfp_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
return 0;
}
-static u32 nfp_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info nfp_chip = {
- .type = hwmon_chip,
- .config = nfp_chip_config,
-};
-
-static u32 nfp_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT,
- 0
-};
-
-static const struct hwmon_channel_info nfp_temp = {
- .type = hwmon_temp,
- .config = nfp_temp_config,
-};
-
-static u32 nfp_power_config[] = {
- HWMON_P_INPUT | HWMON_P_MAX,
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nfp_power = {
- .type = hwmon_power,
- .config = nfp_power_config,
-};
-
static const struct hwmon_channel_info * const nfp_hwmon_info[] = {
- &nfp_chip,
- &nfp_temp,
- &nfp_power,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT),
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_MAX,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT),
NULL
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 98e098c09c03..932f59d70f41 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -159,7 +159,7 @@ static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
static void nfp_net_reconfig_timer(struct timer_list *t)
{
- struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
+ struct nfp_net *nn = timer_container_of(nn, t, reconfig_timer);
spin_lock_bh(&nn->reconfig_lock);
@@ -227,7 +227,7 @@ static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
spin_unlock_bh(&nn->reconfig_lock);
if (cancelled_timer) {
- del_timer_sync(&nn->reconfig_timer);
+ timer_delete_sync(&nn->reconfig_timer);
nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
}
@@ -2538,7 +2538,7 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
nn->dp.num_r_vecs, num_online_cpus());
nn->max_r_vecs = nn->dp.num_r_vecs;
- nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
+ nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(*nn->dp.xsk_pools),
GFP_KERNEL);
if (!nn->dp.xsk_pools) {
err = -ENOMEM;
@@ -2779,7 +2779,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
break;
}
- netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
+ netdev->watchdog_timeo = secs_to_jiffies(5);
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 720f577929db..19aa1f1538aa 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1120,20 +1120,6 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
}
}
-static void nv_napi_enable(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- napi_enable(&np->napi);
-}
-
-static void nv_napi_disable(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- napi_disable(&np->napi);
-}
-
#define MII_READ (-1)
/* mii_rw: read/write a register on the PHY.
*
@@ -1905,7 +1891,7 @@ packet_dropped:
/* If rx bufs are exhausted called after 50ms to attempt to refresh */
static void nv_do_rx_refill(struct timer_list *t)
{
- struct fe_priv *np = from_timer(np, t, oom_kick);
+ struct fe_priv *np = timer_container_of(np, t, oom_kick);
/* Just reschedule NAPI rx processing */
napi_schedule(&np->napi);
@@ -3114,7 +3100,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* Changing the MTU is a rare event, it shouldn't matter.
*/
nv_disable_irq(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
@@ -3143,7 +3129,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_irq(dev);
}
return 0;
@@ -4154,7 +4140,7 @@ static void nv_free_irq(struct net_device *dev)
static void nv_do_nic_poll(struct timer_list *t)
{
- struct fe_priv *np = from_timer(np, t, nic_poll);
+ struct fe_priv *np = timer_container_of(np, t, nic_poll);
struct net_device *dev = np->dev;
u8 __iomem *base = get_hwbase(dev);
u32 mask = 0;
@@ -4273,7 +4259,7 @@ static void nv_do_stats_poll(struct timer_list *t)
__acquires(&netdev_priv(dev)->hwstats_lock)
__releases(&netdev_priv(dev)->hwstats_lock)
{
- struct fe_priv *np = from_timer(np, t, stats_poll);
+ struct fe_priv *np = timer_container_of(np, t, stats_poll);
struct net_device *dev = np->dev;
/* If lock is currently taken, the stats are being refreshed
@@ -4731,7 +4717,7 @@ static int nv_set_ringparam(struct net_device *dev,
if (netif_running(dev)) {
nv_disable_irq(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
@@ -4784,7 +4770,7 @@ static int nv_set_ringparam(struct net_device *dev,
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_irq(dev);
}
return 0;
@@ -5277,7 +5263,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (netif_running(dev)) {
netif_stop_queue(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock_irq(&np->lock);
@@ -5334,7 +5320,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
/* restart rx engine */
nv_start_rxtx(dev);
netif_start_queue(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_hw_interrupts(dev, np->irqmask);
}
}
@@ -5576,6 +5562,7 @@ static int nv_open(struct net_device *dev)
/* ask for interrupts */
nv_enable_hw_interrupts(dev, np->irqmask);
+ netdev_lock(dev);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
@@ -5594,7 +5581,7 @@ static int nv_open(struct net_device *dev)
ret = nv_update_linkspeed(dev);
nv_start_rxtx(dev);
netif_start_queue(dev);
- nv_napi_enable(dev);
+ napi_enable_locked(&np->napi);
if (ret) {
netif_carrier_on(dev);
@@ -5611,6 +5598,7 @@ static int nv_open(struct net_device *dev)
round_jiffies(jiffies + STATS_INTERVAL));
spin_unlock_irq(&np->lock);
+ netdev_unlock(dev);
/* If the loopback feature was set while the device was down, make sure
* that it's set correctly now.
@@ -5632,12 +5620,12 @@ static int nv_close(struct net_device *dev)
spin_lock_irq(&np->lock);
np->in_shutdown = 1;
spin_unlock_irq(&np->lock);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
synchronize_irq(np->pci_dev->irq);
- del_timer_sync(&np->oom_kick);
- del_timer_sync(&np->nic_poll);
- del_timer_sync(&np->stats_poll);
+ timer_delete_sync(&np->oom_kick);
+ timer_delete_sync(&np->nic_poll);
+ timer_delete_sync(&np->stats_poll);
netif_stop_queue(dev);
spin_lock_irq(&np->lock);
diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c
index f9c0dcd965c2..db200e4ec284 100644
--- a/drivers/net/ethernet/oa_tc6.c
+++ b/drivers/net/ethernet/oa_tc6.c
@@ -113,6 +113,7 @@ struct oa_tc6 {
struct mii_bus *mdiobus;
struct spi_device *spi;
struct mutex spi_ctrl_lock; /* Protects spi control transfer */
+ spinlock_t tx_skb_lock; /* Protects tx skb handling */
void *spi_ctrl_tx_buf;
void *spi_ctrl_rx_buf;
void *spi_data_tx_buf;
@@ -1004,8 +1005,10 @@ static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
used_tx_credits++) {
if (!tc6->ongoing_tx_skb) {
+ spin_lock_bh(&tc6->tx_skb_lock);
tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
tc6->waiting_tx_skb = NULL;
+ spin_unlock_bh(&tc6->tx_skb_lock);
}
if (!tc6->ongoing_tx_skb)
break;
@@ -1111,8 +1114,9 @@ static int oa_tc6_spi_thread_handler(void *data)
/* This kthread will be waken up if there is a tx skb or mac-phy
* interrupt to perform spi transfer with tx chunks.
*/
- wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb ||
- tc6->int_flag ||
+ wait_event_interruptible(tc6->spi_wq, tc6->int_flag ||
+ (tc6->waiting_tx_skb &&
+ tc6->tx_credits) ||
kthread_should_stop());
if (kthread_should_stop())
@@ -1209,7 +1213,9 @@ netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
return NETDEV_TX_OK;
}
+ spin_lock_bh(&tc6->tx_skb_lock);
tc6->waiting_tx_skb = skb;
+ spin_unlock_bh(&tc6->tx_skb_lock);
/* Wake spi kthread to perform spi transfer */
wake_up_interruptible(&tc6->spi_wq);
@@ -1239,6 +1245,7 @@ struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
tc6->netdev = netdev;
SET_NETDEV_DEV(netdev, &spi->dev);
mutex_init(&tc6->spi_ctrl_lock);
+ spin_lock_init(&tc6->tx_skb_lock);
/* Set the SPI controller to pump at realtime priority */
tc6->spi->rt = true;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 4ac29cd59f2b..e5a6f59af0b6 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1014,8 +1014,8 @@ static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
*/
static void pch_gbe_watchdog(struct timer_list *t)
{
- struct pch_gbe_adapter *adapter = from_timer(adapter, t,
- watchdog_timer);
+ struct pch_gbe_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
struct net_device *netdev = adapter->netdev;
struct pch_gbe_hw *hw = &adapter->hw;
@@ -1916,7 +1916,7 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
pch_gbe_irq_disable(adapter);
pch_gbe_free_irq(adapter);
- del_timer_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
netdev->tx_queue_len = adapter->tx_queue_len;
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index a36d422b5173..b0de7e9f12a5 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1025,7 +1025,7 @@ static inline int hamachi_tx(struct net_device *dev)
static void hamachi_timer(struct timer_list *t)
{
- struct hamachi_private *hmp = from_timer(hmp, t, timer);
+ struct hamachi_private *hmp = timer_container_of(hmp, t, timer);
struct net_device *dev = hmp->mii_if.dev;
void __iomem *ioaddr = hmp->base;
int next_tick = 10*HZ;
@@ -1712,7 +1712,7 @@ static int hamachi_close(struct net_device *dev)
free_irq(hmp->pci_dev->irq, dev);
- del_timer_sync(&hmp->timer);
+ timer_delete_sync(&hmp->timer);
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index c0515dc63246..1e25ac13a7d8 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -652,7 +652,7 @@ err_free_irq:
static void yellowfin_timer(struct timer_list *t)
{
- struct yellowfin_private *yp = from_timer(yp, t, timer);
+ struct yellowfin_private *yp = timer_container_of(yp, t, timer);
struct net_device *dev = pci_get_drvdata(yp->pci_dev);
void __iomem *ioaddr = yp->base;
int next_tick = 60*HZ;
@@ -1222,7 +1222,7 @@ static int yellowfin_close(struct net_device *dev)
iowrite32(0x80000000, ioaddr + RxCtrl);
iowrite32(0x80000000, ioaddr + TxCtrl);
- del_timer(&yp->timer);
+ timer_delete(&yp->timer);
#if defined(__i386__)
if (yellowfin_debug > 2) {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index cb4e12df7719..fe58024b5901 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -934,7 +934,8 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
static void pasemi_mac_tx_timer(struct timer_list *t)
{
- struct pasemi_mac_txring *txring = from_timer(txring, t, clean_timer);
+ struct pasemi_mac_txring *txring = timer_container_of(txring, t,
+ clean_timer);
struct pasemi_mac *mac = txring->mac;
pasemi_mac_clean_tx(txring);
@@ -1288,7 +1289,7 @@ static int pasemi_mac_close(struct net_device *dev)
phy_disconnect(dev->phydev);
}
- del_timer_sync(&mac->tx->clean_timer);
+ timer_delete_sync(&mac->tx->clean_timer);
netif_stop_queue(dev);
napi_disable(&mac->napi);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 1c61390677f7..04f00ea94230 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -18,8 +18,6 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
-#define IONIC_ASIC_TYPE_ELBA 2
-
#define DEVCMD_TIMEOUT 5
#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
@@ -59,7 +57,6 @@ struct ionic {
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
cpumask_var_t *affinity_masks;
struct delayed_work doorbell_check_dwork;
- struct work_struct nb_work;
struct notifier_block nb;
struct rw_semaphore vf_op_lock; /* lock for VF operations */
struct ionic_vf *vfs;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index f5dc876eb500..4c377bdc62c8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -441,7 +441,7 @@ static void ionic_reset_prepare(struct pci_dev *pdev)
set_bit(IONIC_LIF_F_FW_RESET, lif->state);
- del_timer_sync(&ionic->watchdog_timer);
+ timer_delete_sync(&ionic->watchdog_timer);
cancel_work_sync(&lif->deferred.work);
mutex_lock(&lif->queue_lock);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 9e42d599840d..18b9c8a810ae 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -13,7 +13,7 @@
static void ionic_watchdog_cb(struct timer_list *t)
{
- struct ionic *ionic = from_timer(ionic, t, watchdog_timer);
+ struct ionic *ionic = timer_container_of(ionic, t, watchdog_timer);
struct ionic_lif *lif = ionic->lif;
struct ionic_deferred_work *work;
int hb;
@@ -277,7 +277,10 @@ void ionic_dev_teardown(struct ionic *ionic)
idev->phy_cmb_pages = 0;
idev->cmb_npages = 0;
- destroy_workqueue(ionic->wq);
+ if (ionic->wq) {
+ destroy_workqueue(ionic->wq);
+ ionic->wq = NULL;
+ }
mutex_destroy(&idev->cmb_inuse_lock);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index dda22fa4448c..92f30ff2d631 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -158,6 +158,20 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
25000baseCR_Full);
copper_seen++;
break;
+ case IONIC_XCVR_PID_QSFP_50G_CR2_FC:
+ case IONIC_XCVR_PID_QSFP_50G_CR2:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseCR2_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, 200000baseCR4_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, 400000baseCR4_Full);
+ copper_seen++;
+ break;
case IONIC_XCVR_PID_SFP_10GBASE_AOC:
case IONIC_XCVR_PID_SFP_10GBASE_CU:
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -196,6 +210,31 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
break;
+ case IONIC_XCVR_PID_QSFP_200G_AOC:
+ case IONIC_XCVR_PID_QSFP_200G_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseSR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_FR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseLR4_ER4_FR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_DR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseDR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_FR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseLR4_ER4_FR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_DR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseDR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseSR4_Full);
+ break;
case IONIC_XCVR_PID_SFP_10GBASE_SR:
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
@@ -909,63 +948,20 @@ static int ionic_get_tunable(struct net_device *netdev,
return 0;
}
-static int ionic_get_module_info(struct net_device *netdev,
- struct ethtool_modinfo *modinfo)
-
+static int ionic_do_module_copy(u8 *dst, u8 *src, u32 len)
{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_xcvr_status *xcvr;
- struct sfp_eeprom_base *sfp;
-
- xcvr = &idev->port_info->status.xcvr;
- sfp = (struct sfp_eeprom_base *) xcvr->sprom;
-
- /* report the module data type and length */
- switch (sfp->phys_id) {
- case SFF8024_ID_SFP:
- modinfo->type = ETH_MODULE_SFF_8079;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- break;
- case SFF8024_ID_QSFP_8436_8636:
- case SFF8024_ID_QSFP28_8636:
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
- break;
- default:
- netdev_info(netdev, "unknown xcvr type 0x%02x\n",
- xcvr->sprom[0]);
- modinfo->type = 0;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- break;
- }
-
- return 0;
-}
-
-static int ionic_get_module_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *ee,
- u8 *data)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_xcvr_status *xcvr;
- char tbuf[sizeof(xcvr->sprom)];
+ char tbuf[sizeof_field(struct ionic_xcvr_status, sprom)];
int count = 10;
- u32 len;
/* The NIC keeps the module prom up-to-date in the DMA space
* so we can simply copy the module bytes into the data buffer.
*/
- xcvr = &idev->port_info->status.xcvr;
- len = min_t(u32, sizeof(xcvr->sprom), ee->len);
-
do {
- memcpy(data, xcvr->sprom, len);
- memcpy(tbuf, xcvr->sprom, len);
+ memcpy(dst, src, len);
+ memcpy(tbuf, src, len);
/* Let's make sure we got a consistent copy */
- if (!memcmp(data, tbuf, len))
+ if (!memcmp(dst, tbuf, len))
break;
} while (--count);
@@ -976,6 +972,48 @@ static int ionic_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static int ionic_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev = &lif->ionic->idev;
+ u32 err = -EINVAL;
+ u8 *src;
+
+ if (!page_data->length)
+ return -EINVAL;
+
+ if (page_data->bank != 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Only bank 0 is supported");
+ return -EINVAL;
+ }
+
+ switch (page_data->page) {
+ case 0:
+ src = &idev->port_info->status.xcvr.sprom[page_data->offset];
+ break;
+ case 1:
+ src = &idev->port_info->sprom_page1[page_data->offset - 128];
+ break;
+ case 2:
+ src = &idev->port_info->sprom_page2[page_data->offset - 128];
+ break;
+ case 17:
+ src = &idev->port_info->sprom_page17[page_data->offset - 128];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ memset(page_data->data, 0, page_data->length);
+ err = ionic_do_module_copy(page_data->data, src, page_data->length);
+ if (err)
+ return err;
+
+ return page_data->length;
+}
+
static int ionic_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
@@ -1121,8 +1159,7 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.set_rxfh = ionic_set_rxfh,
.get_tunable = ionic_get_tunable,
.set_tunable = ionic_set_tunable,
- .get_module_info = ionic_get_module_info,
- .get_module_eeprom = ionic_get_module_eeprom,
+ .get_module_eeprom_by_page = ionic_get_module_eeprom_by_page,
.get_pauseparam = ionic_get_pauseparam,
.set_pauseparam = ionic_set_pauseparam,
.get_fecparam = ionic_get_fecparam,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 9c85c0706c6e..f1ddbe9994a3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -1277,7 +1277,10 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3,
IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4,
IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5,
-
+ IONIC_XCVR_PID_QSFP_50G_CR2_FC = 6,
+ IONIC_XCVR_PID_QSFP_50G_CR2 = 7,
+ IONIC_XCVR_PID_QSFP_200G_CR4 = 8,
+ IONIC_XCVR_PID_QSFP_400G_CR4 = 9,
/* Fiber */
IONIC_XCVR_PID_QSFP_100G_AOC = 50,
IONIC_XCVR_PID_QSFP_100G_ACC = 51,
@@ -1303,6 +1306,15 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_25GBASE_ACC = 71,
IONIC_XCVR_PID_SFP_10GBASE_T = 72,
IONIC_XCVR_PID_SFP_1000BASE_T = 73,
+ IONIC_XCVR_PID_QSFP_200G_AOC = 74,
+ IONIC_XCVR_PID_QSFP_200G_FR4 = 75,
+ IONIC_XCVR_PID_QSFP_200G_DR4 = 76,
+ IONIC_XCVR_PID_QSFP_200G_SR4 = 77,
+ IONIC_XCVR_PID_QSFP_200G_ACC = 78,
+ IONIC_XCVR_PID_QSFP_400G_FR4 = 79,
+ IONIC_XCVR_PID_QSFP_400G_DR4 = 80,
+ IONIC_XCVR_PID_QSFP_400G_SR4 = 81,
+ IONIC_XCVR_PID_QSFP_400G_VR4 = 82,
};
/**
@@ -1404,6 +1416,8 @@ struct ionic_xcvr_status {
*/
union ionic_port_config {
struct {
+#define IONIC_SPEED_400G 400000 /* 400G in Mbps */
+#define IONIC_SPEED_200G 200000 /* 200G in Mbps */
#define IONIC_SPEED_100G 100000 /* 100G in Mbps */
#define IONIC_SPEED_50G 50000 /* 50G in Mbps */
#define IONIC_SPEED_40G 40000 /* 40G in Mbps */
@@ -2825,6 +2839,10 @@ union ionic_port_identity {
* @status: Port status data
* @stats: Port statistics data
* @mgmt_stats: Port management statistics data
+ * @sprom_epage: Extended Transceiver sprom
+ * @sprom_page1: Extended Transceiver sprom, page 1
+ * @sprom_page2: Extended Transceiver sprom, page 2
+ * @sprom_page17: Extended Transceiver sprom, page 17
* @rsvd: reserved byte(s)
* @pb_stats: uplink pb drop stats
*/
@@ -2835,8 +2853,17 @@ struct ionic_port_info {
struct ionic_port_stats stats;
struct ionic_mgmt_port_stats mgmt_stats;
};
- /* room for pb_stats to start at 2k offset */
- u8 rsvd[760];
+ union {
+ u8 sprom_epage[384];
+ struct {
+ u8 sprom_page1[128];
+ u8 sprom_page2[128];
+ u8 sprom_page17[128];
+ };
+ };
+ u8 rsvd[376];
+
+ /* pb_stats must start at 2k offset */
struct ionic_port_pb_stats pb_stats;
};
@@ -3209,7 +3236,11 @@ union ionic_adminq_comp {
#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000
#define IONIC_DEV_CMD_DONE 0x00000001
-#define IONIC_ASIC_TYPE_CAPRI 0
+#define IONIC_ASIC_TYPE_NONE 0
+#define IONIC_ASIC_TYPE_CAPRI 1
+#define IONIC_ASIC_TYPE_ELBA 2
+#define IONIC_ASIC_TYPE_GIGLIO 3
+#define IONIC_ASIC_TYPE_SALINA 4
/**
* struct ionic_doorbell - Doorbell register layout
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 40496587b2b3..7707a9e53c43 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3265,7 +3265,7 @@ int ionic_lif_alloc(struct ionic *ionic)
lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
le32_to_cpu(lif->identity->eth.min_frame_size));
lif->netdev->max_mtu =
- le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
+ le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
lif->neqs = ionic->neqs_per_lif;
lif->nxqs = ionic->ntxqs_per_lif;
@@ -3804,10 +3804,6 @@ err_out_adminq_deinit:
return err;
}
-static void ionic_lif_notify_work(struct work_struct *ws)
-{
-}
-
static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
{
struct ionic_admin_ctx ctx = {
@@ -3858,8 +3854,6 @@ int ionic_lif_register(struct ionic_lif *lif)
ionic_lif_register_phc(lif);
- INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
-
lif->ionic->nb.notifier_call = ionic_lif_notify;
err = register_netdevice_notifier(&lif->ionic->nb);
@@ -3869,8 +3863,8 @@ int ionic_lif_register(struct ionic_lif *lif)
/* only register LIF0 for now */
err = register_netdev(lif->netdev);
if (err) {
- dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
- ionic_lif_unregister_phc(lif);
+ dev_err(lif->ionic->dev, "Cannot register net device: %d, aborting\n", err);
+ ionic_lif_unregister(lif);
return err;
}
@@ -3885,7 +3879,6 @@ void ionic_lif_unregister(struct ionic_lif *lif)
{
if (lif->ionic->nb.notifier_call) {
unregister_netdevice_notifier(&lif->ionic->nb);
- cancel_work_sync(&lif->ionic->nb_work);
lif->ionic->nb.notifier_call = NULL;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 0f817c3f92d8..0e60a6bef99a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -81,8 +81,9 @@ static int ionic_error_to_errno(enum ionic_status_code code)
case IONIC_RC_EQTYPE:
case IONIC_RC_EQID:
case IONIC_RC_EINVAL:
- case IONIC_RC_ENOSUPP:
return -EINVAL;
+ case IONIC_RC_ENOSUPP:
+ return -EOPNOTSUPP;
case IONIC_RC_EPERM:
return -EPERM;
case IONIC_RC_ENOENT:
@@ -515,9 +516,9 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
unsigned long start_time;
unsigned long max_wait;
unsigned long duration;
- int done = 0;
bool fw_up;
int opcode;
+ bool done;
int err;
/* Wait for dev cmd to complete, retrying if we get EAGAIN,
@@ -525,6 +526,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
+ done = false;
opcode = idev->opcode;
start_time = jiffies;
for (fw_up = ionic_is_fw_running(idev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 2ac59564ded1..d10b58ebf603 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -321,7 +321,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
len, DMA_TO_DEVICE);
} else /* XDP_REDIRECT */ {
dma_addr = ionic_tx_map_single(q, frame->data, len);
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
return -EIO;
}
@@ -357,7 +357,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
} else {
dma_addr = ionic_tx_map_frag(q, frag, 0,
skb_frag_size(frag));
- if (dma_mapping_error(q->dev, dma_addr)) {
+ if (dma_addr == DMA_MAPPING_ERROR) {
ionic_tx_desc_unmap_bufs(q, desc_info);
return -EIO;
}
@@ -1083,7 +1083,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -1100,7 +1100,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -1116,7 +1116,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
int frag_idx;
dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
return -EIO;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_headlen(skb);
@@ -1126,7 +1126,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags;
for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
goto dma_fail;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_frag_size(frag);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 9cff0a8ffb2c..3383ee1dad14 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2832,7 +2832,7 @@ netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
static ssize_t
netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2860,7 +2860,7 @@ netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
static ssize_t
netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2901,7 +2901,7 @@ netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
static ssize_t
netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2922,7 +2922,7 @@ netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
}
static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2946,20 +2946,20 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
static const struct bin_attribute bin_attr_crb = {
.attr = { .name = "crb", .mode = 0644 },
.size = 0,
- .read = netxen_sysfs_read_crb,
- .write = netxen_sysfs_write_crb,
+ .read_new = netxen_sysfs_read_crb,
+ .write_new = netxen_sysfs_write_crb,
};
static const struct bin_attribute bin_attr_mem = {
.attr = { .name = "mem", .mode = 0644 },
.size = 0,
- .read = netxen_sysfs_read_mem,
- .write = netxen_sysfs_write_mem,
+ .read_new = netxen_sysfs_read_mem,
+ .write_new = netxen_sysfs_write_mem,
};
static ssize_t
netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3082,7 +3082,7 @@ out:
static const struct bin_attribute bin_attr_dimm = {
.attr = { .name = "dimm", .mode = 0644 },
.size = sizeof(struct netxen_dimm_cfg),
- .read = netxen_sysfs_read_dimm,
+ .read_new = netxen_sysfs_read_dimm,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index b7def3b54937..016b575861b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -939,7 +939,6 @@ u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
/* doorbell recovery mechanism */
-void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
index f6cd1b3efdfd..27e91d0d39f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
@@ -1305,37 +1305,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
char *results_buf);
/**
- * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and
- * keeps the MCP trace meta data allocated, to support continuous MCP Trace
- * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
- * be called to free the meta data.
- *
- * @p_hwfn: HW device data.
- * @dump_buf: MVP trace dump buffer, starting from the header.
- * @results_buf: Buffer for printing the mcp trace results.
- *
- * Return: Error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf);
-
-/**
- * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line
- *
- * @p_hwfn: HW device data.
- * @dump_buf: MCP trace dump buffer, starting from the header.
- * @num_dumped_bytes: Number of bytes that were dumped.
- * @results_buf: Buffer for printing the mcp trace results.
- *
- * Return: Error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf);
-
-/**
* qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data.
* Should be called after continuous MCP Trace parsing.
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 464a72afb758..9c3d3dd2f847 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7614,31 +7614,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
results_buf, &parsed_buf_size, true);
}
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf)
-{
- u32 parsed_buf_size;
-
- return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
- &parsed_buf_size, false);
-}
-
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf)
-{
- u32 parsed_results_bytes;
-
- return qed_parse_mcp_trace_buf(p_hwfn,
- dump_buf,
- num_dumped_bytes,
- 0,
- num_dumped_bytes,
- results_buf, &parsed_results_bytes);
-}
-
/* Frees the specified MCP Trace meta data */
void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 86a93cac2647..9659ce5b0712 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -255,25 +255,6 @@ static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn)
p_hwfn->db_recovery_info.db_recovery_counter = 0;
}
-/* Print the content of the doorbell recovery mechanism */
-void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
-{
- struct qed_db_recovery_entry *db_entry = NULL;
-
- DP_NOTICE(p_hwfn,
- "Displaying doorbell recovery database. Counter was %d\n",
- p_hwfn->db_recovery_info.db_recovery_counter);
-
- /* Protect the list */
- spin_lock_bh(&p_hwfn->db_recovery_info.lock);
- list_for_each_entry(db_entry,
- &p_hwfn->db_recovery_info.list, list_entry) {
- qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
- }
-
- spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
-}
-
/* Ring the doorbell of a single doorbell recovery entry */
static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
struct qed_db_recovery_entry *db_entry)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index ed1a84542ad2..10e355397cee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -2666,58 +2666,6 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
-/**
- * qed_calc_session_ctx_validation(): Calcualte validation byte for
- * session context.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Context size.
- * @ctx_type: Context type.
- * @cid: Context cid.
- *
- * Return: Void.
- */
-void qed_calc_session_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid);
-
-/**
- * qed_calc_task_ctx_validation(): Calcualte validation byte for task
- * context.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Context size.
- * @ctx_type: Context type.
- * @tid: Context tid.
- *
- * Return: Void.
- */
-void qed_calc_task_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid);
-
-/**
- * qed_memset_session_ctx(): Memset session context to 0 while
- * preserving validation bytes.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Size to initialzie.
- * @ctx_type: Context type.
- *
- * Return: Void.
- */
-void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
-
-/**
- * qed_memset_task_ctx(): Memset task context to 0 while preserving
- * validation bytes.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: size to initialzie.
- * @ctx_type: context type.
- *
- * Return: Void.
- */
-void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
-
#define NUM_STORMS 6
/**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 9e5f0dbc8a07..9907973399dc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -69,17 +69,6 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
return 0;
}
-void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
-{
- struct qed_ptt *p_ptt;
- int i;
-
- for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
- p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
- p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
- }
-}
-
void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_ptt_pool);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index e535983ce21b..3c98f58a184f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -62,15 +62,6 @@ enum _dmae_cmd_crc_mask {
void qed_gtt_init(struct qed_hwfn *p_hwfn);
/**
- * qed_ptt_invalidate(): Forces all ptt entries to be re-configured
- *
- * @p_hwfn: HW device data.
- *
- * Return: Void.
- */
-void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
-
-/**
* qed_ptt_pool_alloc(): Allocate and initialize PTT pool.
*
* @p_hwfn: HW device data.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 407029a36fa1..aa20bb8caa9a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -18,16 +18,6 @@
#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
-static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
- {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
- {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
- {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
-};
-
-static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
- {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
-};
-
/* General constants */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
@@ -1576,134 +1566,6 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
}
-DECLARE_CRC8_TABLE(cdu_crc8_table);
-
-/* Calculate and return CDU validation byte per connection type/region/cid */
-static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
-{
- const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
- u8 crc, validation_byte = 0;
- static u8 crc8_table_valid; /* automatically initialized to 0 */
- u32 validation_string = 0;
- __be32 data_to_crc;
-
- if (!crc8_table_valid) {
- crc8_populate_msb(cdu_crc8_table, 0x07);
- crc8_table_valid = 1;
- }
-
- /* The CRC is calculated on the String-to-compress:
- * [31:8] = {CID[31:20],CID[11:0]}
- * [7:4] = Region
- * [3:0] = Type
- */
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
- validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
-
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
- validation_string |= ((region & 0xF) << 4);
-
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
- validation_string |= (conn_type & 0xF);
-
- /* Convert to big-endian and calculate CRC8 */
- data_to_crc = cpu_to_be32(validation_string);
- crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
- CRC8_INIT_VALUE);
-
- /* The validation byte [7:0] is composed:
- * for type A validation
- * [7] = active configuration bit
- * [6:0] = crc[6:0]
- *
- * for type B validation
- * [7] = active configuration bit
- * [6:3] = connection_type[3:0]
- * [2:0] = crc[2:0]
- */
- validation_byte |=
- ((validation_cfg >>
- CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
-
- if ((validation_cfg >>
- CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
- validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
- else
- validation_byte |= crc & 0x7F;
-
- return validation_byte;
-}
-
-/* Calcualte and set validation bytes for session context */
-void qed_calc_session_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid)
-{
- u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
-
- p_ctx = (u8 * const)p_ctx_mem;
- x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
- t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
- u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
-
- memset(p_ctx, 0, ctx_size);
-
- *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
- *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
- *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
-}
-
-/* Calcualte and set validation bytes for task context */
-void qed_calc_task_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid)
-{
- u8 *p_ctx, *region1_val_ptr;
-
- p_ctx = (u8 * const)p_ctx_mem;
- region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
-
- memset(p_ctx, 0, ctx_size);
-
- *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
-}
-
-/* Memset session context to 0 while preserving validation bytes */
-void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
-{
- u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
- u8 x_val, t_val, u_val;
-
- p_ctx = (u8 * const)p_ctx_mem;
- x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
- t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
- u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
-
- x_val = *x_val_ptr;
- t_val = *t_val_ptr;
- u_val = *u_val_ptr;
-
- memset(p_ctx, 0, ctx_size);
-
- *x_val_ptr = x_val;
- *t_val_ptr = t_val;
- *u_val_ptr = u_val;
-}
-
-/* Memset task context to 0 while preserving validation bytes */
-void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
-{
- u8 *p_ctx, *region1_val_ptr;
- u8 region1_val;
-
- p_ctx = (u8 * const)p_ctx_mem;
- region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
-
- region1_val = *region1_val_ptr;
-
- memset(p_ctx, 0, ctx_size);
-
- *region1_val_ptr = region1_val;
-}
-
/* Enable and configure context validation */
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index f915c423fe70..886061d7351a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -454,7 +454,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
static void qed_free_cdev(struct qed_dev *cdev)
{
- kfree((void *)cdev);
+ kfree(cdev);
}
static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 26a714bfad4e..c7f497c36f66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3301,7 +3301,9 @@ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
+ if (((rsp & FW_MSG_CODE_MASK) == FW_MSG_CODE_UNSUPPORTED))
+ rc = -EOPNOTSUPP;
+ else if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
rc = -EINVAL;
return rc;
@@ -3356,6 +3358,7 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
p_ptt, &nvm_info.num_images);
if (rc == -EOPNOTSUPP) {
DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
+ nvm_info.num_images = 0;
goto out;
} else if (rc || !nvm_info.num_images) {
DP_ERR(p_hwfn, "Failed getting number of images\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
index f55eed092f25..7d78f072b0a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -242,7 +242,7 @@ static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
}
/* Returns size of the data buffer or, -1 in case TLV data is not available. */
-static int
+static noinline_for_stack int
qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_generic *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -304,7 +304,7 @@ qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
return -1;
}
-static int
+static noinline_for_stack int
qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_eth *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -438,7 +438,7 @@ qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
return QED_MFW_TLV_TIME_SIZE;
}
-static int
+static noinline_for_stack int
qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_fcoe *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -1073,7 +1073,7 @@ qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
return -1;
}
-static int
+static noinline_for_stack int
qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_iscsi *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index fa167b1aa019..5222a035fd19 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -3033,7 +3033,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
u16 length;
int rc;
- /* Valiate PF can send such a request */
+ /* Validate PF can send such a request */
if (!vf->vport_instance) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
@@ -3312,7 +3312,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Determine if the unicast filtering is acceptible by PF */
+ /* Determine if the unicast filtering is acceptable by PF */
if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
(params.type == QED_FILTER_VLAN ||
params.type == QED_FILTER_MAC_VLAN)) {
@@ -3729,7 +3729,7 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
if (rc) {
- DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] access\n",
vfid);
return rc;
}
@@ -4480,7 +4480,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
/* Failure to acquire the ptt in 100g creates an odd error
- * where the first engine has already relased IOV.
+ * where the first engine has already released IOV.
*/
if (!ptt) {
DP_ERR(hwfn, "Failed to acquire ptt\n");
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 99df00c30b8c..b5d744d2586f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -203,7 +203,7 @@ static struct pci_driver qede_pci_driver = {
};
static struct qed_eth_cb_ops qede_ll_ops = {
- {
+ .common = {
#ifdef CONFIG_RFS_ACCEL
.arfs_filter_op = qede_arfs_filter_op,
#endif
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index fc78bc959ded..aee4e63b4b82 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3420,7 +3420,7 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
pci_disable_msi(qdev->pdev);
}
- del_timer_sync(&qdev->adapter_timer);
+ timer_delete_sync(&qdev->adapter_timer);
napi_disable(&qdev->napi);
@@ -3735,7 +3735,7 @@ static void ql_get_board_info(struct ql3_adapter *qdev)
static void ql3xxx_timer(struct timer_list *t)
{
- struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
+ struct ql3_adapter *qdev = timer_container_of(qdev, t, adapter_timer);
queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index f9dd50152b1e..d57b976b9040 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -454,8 +454,10 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
num_vlans = sriov->num_allowed_vlans;
sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL);
- if (!sriov->allowed_vlans)
+ if (!sriov->allowed_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
vlans = (u16 *)&cmd->rsp.arg[3];
for (i = 0; i < num_vlans; i++)
@@ -1482,8 +1484,11 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o
}
cmd_op = (cmd.rsp.arg[0] & 0xff);
- if (cmd.rsp.arg[0] >> 25 == 2)
- return 2;
+ if (cmd.rsp.arg[0] >> 25 == 2) {
+ ret = 2;
+ goto out;
+ }
+
if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
set_bit(QLC_BC_VF_STATE, &vf->state);
else
@@ -2167,8 +2172,10 @@ int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
vf = &sriov->vf_info[i];
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
sizeof(*vf->sriov_vlans), GFP_KERNEL);
- if (!vf->sriov_vlans)
+ if (!vf->sriov_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 74125188beb8..c0f20464fd1e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -264,7 +264,7 @@ static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
}
static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -281,7 +281,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
}
static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -310,7 +310,7 @@ static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
}
static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -332,7 +332,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
}
static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -396,7 +396,7 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -446,7 +446,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -539,7 +539,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -623,7 +623,7 @@ out:
static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -675,7 +675,7 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -722,7 +722,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -769,7 +769,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -804,7 +804,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -839,7 +839,7 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -868,7 +868,7 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -898,7 +898,7 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -938,7 +938,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -1115,7 +1115,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -1195,64 +1195,63 @@ static const struct device_attribute dev_attr_beacon = {
static const struct bin_attribute bin_attr_crb = {
.attr = { .name = "crb", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_crb,
- .write = qlcnic_sysfs_write_crb,
+ .read_new = qlcnic_sysfs_read_crb,
+ .write_new = qlcnic_sysfs_write_crb,
};
static const struct bin_attribute bin_attr_mem = {
.attr = { .name = "mem", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_mem,
- .write = qlcnic_sysfs_write_mem,
+ .read_new = qlcnic_sysfs_read_mem,
+ .write_new = qlcnic_sysfs_write_mem,
};
static const struct bin_attribute bin_attr_npar_config = {
.attr = { .name = "npar_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_npar_config,
- .write = qlcnic_sysfs_write_npar_config,
+ .read_new = qlcnic_sysfs_read_npar_config,
+ .write_new = qlcnic_sysfs_write_npar_config,
};
static const struct bin_attribute bin_attr_pci_config = {
.attr = { .name = "pci_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_pci_config,
- .write = NULL,
+ .read_new = qlcnic_sysfs_read_pci_config,
};
static const struct bin_attribute bin_attr_port_stats = {
.attr = { .name = "port_stats", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_get_port_stats,
- .write = qlcnic_sysfs_clear_port_stats,
+ .read_new = qlcnic_sysfs_get_port_stats,
+ .write_new = qlcnic_sysfs_clear_port_stats,
};
static const struct bin_attribute bin_attr_esw_stats = {
.attr = { .name = "esw_stats", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_get_esw_stats,
- .write = qlcnic_sysfs_clear_esw_stats,
+ .read_new = qlcnic_sysfs_get_esw_stats,
+ .write_new = qlcnic_sysfs_clear_esw_stats,
};
static const struct bin_attribute bin_attr_esw_config = {
.attr = { .name = "esw_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_esw_config,
- .write = qlcnic_sysfs_write_esw_config,
+ .read_new = qlcnic_sysfs_read_esw_config,
+ .write_new = qlcnic_sysfs_write_esw_config,
};
static const struct bin_attribute bin_attr_pm_config = {
.attr = { .name = "pm_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_pm_config,
- .write = qlcnic_sysfs_write_pm_config,
+ .read_new = qlcnic_sysfs_read_pm_config,
+ .write_new = qlcnic_sysfs_write_pm_config,
};
static const struct bin_attribute bin_attr_flash = {
.attr = { .name = "flash", .mode = 0644 },
.size = 0,
- .read = qlcnic_83xx_sysfs_flash_read_handler,
- .write = qlcnic_83xx_sysfs_flash_write_handler,
+ .read_new = qlcnic_83xx_sysfs_flash_read_handler,
+ .write_new = qlcnic_83xx_sysfs_flash_write_handler,
};
#ifdef CONFIG_QLCNIC_HWMON
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index 9210ff360fdc..a4434eb38950 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -52,7 +52,6 @@ config QCOM_EMAC
depends on HAS_DMA && HAS_IOMEM
select CRC32
select PHYLIB
- select MDIO_DEVRES
help
This driver supports the Qualcomm Technologies, Inc. Gigabit
Ethernet Media Access Controller (EMAC). The controller
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index ef9c02b000e4..38a779f4b866 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -54,7 +54,7 @@ MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000."
#define QCASPI_PLUGGABLE_MIN 0
#define QCASPI_PLUGGABLE_MAX 1
-static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN;
+static int qcaspi_pluggable = QCASPI_PLUGGABLE_MAX;
module_param(qcaspi_pluggable, int, 0);
MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
@@ -818,7 +818,6 @@ qcaspi_netdev_init(struct net_device *dev)
dev->mtu = QCAFRM_MAX_MTU;
dev->type = ARPHRD_ETHER;
- qca->clkspeed = qcaspi_clkspeed;
qca->burst_len = qcaspi_burst_len;
qca->spi_thread = NULL;
qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
@@ -909,17 +908,15 @@ qca_spi_probe(struct spi_device *spi)
legacy_mode = of_property_read_bool(spi->dev.of_node,
"qca,legacy-mode");
- if (qcaspi_clkspeed == 0) {
- if (spi->max_speed_hz)
- qcaspi_clkspeed = spi->max_speed_hz;
- else
- qcaspi_clkspeed = QCASPI_CLK_SPEED;
- }
+ if (qcaspi_clkspeed)
+ spi->max_speed_hz = qcaspi_clkspeed;
+ else if (!spi->max_speed_hz)
+ spi->max_speed_hz = QCASPI_CLK_SPEED;
- if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
- (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
- dev_err(&spi->dev, "Invalid clkspeed: %d\n",
- qcaspi_clkspeed);
+ if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN ||
+ spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) {
+ dev_err(&spi->dev, "Invalid clkspeed: %u\n",
+ spi->max_speed_hz);
return -EINVAL;
}
@@ -944,14 +941,13 @@ qca_spi_probe(struct spi_device *spi)
return -EINVAL;
}
- dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
+ dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n",
QCASPI_DRV_VERSION,
- qcaspi_clkspeed,
+ spi->max_speed_hz,
qcaspi_burst_len,
qcaspi_pluggable);
spi->mode = SPI_MODE_3;
- spi->max_speed_hz = qcaspi_clkspeed;
if (spi_setup(spi) < 0) {
dev_err(&spi->dev, "Unable to setup SPI device\n");
return -EFAULT;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 7ba5c9e2f61c..90b290f94c27 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -89,7 +89,6 @@ struct qcaspi {
#endif
/* user configurable options */
- u32 clkspeed;
u8 legacy_mode;
u16 burst_len;
};
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index f3bea196a8f9..ba8763cac9d9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -117,11 +117,14 @@ static void rmnet_unregister_bridge(struct rmnet_port *port)
rmnet_unregister_real_device(bridge_dev);
}
-static int rmnet_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int rmnet_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
@@ -134,7 +137,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
}
- real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev) {
NL_SET_ERR_MSG_MOD(extack, "link does not exist");
return -ENODEV;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index a5e3d1a88305..8b4640c5d61e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -686,8 +686,8 @@ void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
- hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+ hrtimer_setup(&port->hrtimer, rmnet_map_flush_tx_packet_queue, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
spin_lock_init(&port->agg_lock);
rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 9ce0e8a64ba8..a73dcaffa8c5 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1684,6 +1684,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
if (tmp8 & CmdTxEnb)
RTL_W8 (ChipCmd, CmdRxEnb);
+ netdev_lock(dev);
spin_lock_bh(&tp->rx_lock);
/* Disable interrupts by clearing the interrupt mask. */
RTL_W16 (IntrMask, 0x0000);
@@ -1694,11 +1695,12 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
spin_unlock_irq(&tp->lock);
/* ...and finally, reset everything */
- napi_enable(&tp->napi);
+ napi_enable_locked(&tp->napi);
rtl8139_hw_start(dev);
netif_wake_queue(dev);
spin_unlock_bh(&tp->rx_lock);
+ netdev_unlock(dev);
}
static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue)
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 8a8ea51c639e..fe136f61586f 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -114,7 +114,8 @@ config R8169
will be called r8169. This is recommended.
config R8169_LEDS
- def_bool R8169 && LEDS_TRIGGER_NETDEV
+ bool "Support for controlling the NIC LEDs"
+ depends on R8169 && LEDS_TRIGGER_NETDEV
depends on !(R8169=y && LEDS_CLASS=m)
help
Optional support for controlling the NIC LED's with the netdev
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 6cbcb3164367..0d65434982a2 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -717,7 +717,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
problem where the adapter forgets its ethernet address. */
static void atp_timed_checker(struct timer_list *t)
{
- struct net_local *lp = from_timer(lp, t, timer);
+ struct net_local *lp = timer_container_of(lp, t, timer);
struct net_device *dev = lp->dev;
long ioaddr = dev->base_addr;
int tickssofar = jiffies - lp->last_rx_time;
@@ -832,7 +832,7 @@ net_close(struct net_device *dev)
netif_stop_queue(dev);
- del_timer_sync(&lp->timer);
+ timer_delete_sync(&lp->timer);
/* Flush the Tx and disable Rx here. */
lp->addr_mode = CMR2h_OFF;
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index be4c9622618d..2c1a0c21af8d 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -23,7 +23,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_08,
RTL_GIGA_MAC_VER_09,
RTL_GIGA_MAC_VER_10,
- RTL_GIGA_MAC_VER_11,
+ /* support for RTL_GIGA_MAC_VER_11 has been removed */
/* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */
/* RTL_GIGA_MAC_VER_13 was merged with VER_10 */
RTL_GIGA_MAC_VER_14,
@@ -64,14 +64,15 @@ enum mac_version {
/* support for RTL_GIGA_MAC_VER_50 has been removed */
RTL_GIGA_MAC_VER_51,
RTL_GIGA_MAC_VER_52,
- RTL_GIGA_MAC_VER_53,
/* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
RTL_GIGA_MAC_VER_64,
- RTL_GIGA_MAC_VER_65,
RTL_GIGA_MAC_VER_66,
- RTL_GIGA_MAC_NONE
+ RTL_GIGA_MAC_VER_70,
+ RTL_GIGA_MAC_VER_80,
+ RTL_GIGA_MAC_NONE,
+ RTL_GIGA_MAC_VER_LAST = RTL_GIGA_MAC_NONE - 1
};
struct rtl8169_private;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 739707a7b40f..43170500d566 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
-#include <linux/hwmon.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
@@ -57,8 +56,11 @@
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
#define FIRMWARE_8125D_1 "rtl_nic/rtl8125d-1.fw"
+#define FIRMWARE_8125D_2 "rtl_nic/rtl8125d-2.fw"
+#define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
+#define FIRMWARE_8127A_1 "rtl_nic/rtl8127a-1.fw"
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -88,61 +90,119 @@
#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN)
-static const struct {
+static const struct rtl_chip_info {
+ u16 mask;
+ u16 val;
+ enum mac_version mac_version;
const char *name;
const char *fw_name;
} rtl_chip_infos[] = {
- /* PCI devices. */
- [RTL_GIGA_MAC_VER_02] = {"RTL8169s" },
- [RTL_GIGA_MAC_VER_03] = {"RTL8110s" },
- [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" },
- [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" },
- [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" },
- /* PCI-E devices. */
- [RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
- [RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
- [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
- [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" },
- [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_14] = {"RTL8401" },
- [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1},
- [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2},
- [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" },
- [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1},
- [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1},
- [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" },
- [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1},
- [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2},
- [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3},
- [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1},
- [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2},
- [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 },
- [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 },
- [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
- [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
- [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3},
- [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2},
- [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 },
- [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
- [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2},
- [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
- [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
- [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", },
- [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
- /* reserve 62 for CFG_METHOD_4 in the vendor driver */
- [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
- [RTL_GIGA_MAC_VER_64] = {"RTL8125D", FIRMWARE_8125D_1},
- [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
- [RTL_GIGA_MAC_VER_66] = {"RTL8126A", FIRMWARE_8126A_3},
+ /* 8127A family. */
+ { 0x7cf, 0x6c9, RTL_GIGA_MAC_VER_80, "RTL8127A", FIRMWARE_8127A_1 },
+
+ /* 8126A family. */
+ { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_70, "RTL8126A", FIRMWARE_8126A_3 },
+ { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70, "RTL8126A", FIRMWARE_8126A_2 },
+
+ /* 8125BP family. */
+ { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66, "RTL8125BP", FIRMWARE_8125BP_2 },
+
+ /* 8125D family. */
+ { 0x7cf, 0x689, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_2 },
+ { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_1 },
+
+ /* 8125B family. */
+ { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63, "RTL8125B", FIRMWARE_8125B_2 },
+
+ /* 8125A family. */
+ { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61, "RTL8125A", FIRMWARE_8125A_3 },
+
+ /* RTL8117 */
+ { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_52, "RTL8168fp/RTL8117" },
+ { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52, "RTL8168fp/RTL8117",
+ FIRMWARE_8168FP_3 },
+
+ /* 8168EP family. */
+ { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51, "RTL8168ep/8111ep" },
+
+ /* 8168H family. */
+ { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46, "RTL8168h/8111h",
+ FIRMWARE_8168H_2 },
+ /* Realtek calls it RTL8168M, but it's handled like RTL8168H */
+ { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46, "RTL8168M", FIRMWARE_8168H_2 },
+
+ /* 8168G family. */
+ { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44, "RTL8411b", FIRMWARE_8411_2 },
+ { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42, "RTL8168gu/8111gu",
+ FIRMWARE_8168G_3 },
+ { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40, "RTL8168g/8111g",
+ FIRMWARE_8168G_2 },
+
+ /* 8168F family. */
+ { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38, "RTL8411", FIRMWARE_8411_1 },
+ { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36, "RTL8168f/8111f",
+ FIRMWARE_8168F_2 },
+ { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35, "RTL8168f/8111f",
+ FIRMWARE_8168F_1 },
+
+ /* 8168E family. */
+ { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34, "RTL8168evl/8111evl",
+ FIRMWARE_8168E_3 },
+ { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32, "RTL8168e/8111e",
+ FIRMWARE_8168E_1 },
+ { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33, "RTL8168e/8111e",
+ FIRMWARE_8168E_2 },
+
+ /* 8168D family. */
+ { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25, "RTL8168d/8111d",
+ FIRMWARE_8168D_1 },
+ { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26, "RTL8168d/8111d",
+ FIRMWARE_8168D_2 },
+
+ /* 8168DP family. */
+ { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28, "RTL8168dp/8111dp" },
+ { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31, "RTL8168dp/8111dp" },
+
+ /* 8168C family. */
+ { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23, "RTL8168cp/8111cp" },
+ { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18, "RTL8168cp/8111cp" },
+ { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24, "RTL8168cp/8111cp" },
+ { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19, "RTL8168c/8111c" },
+ { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20, "RTL8168c/8111c" },
+ { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21, "RTL8168c/8111c" },
+ { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22, "RTL8168c/8111c" },
+
+ /* 8168B family. */
+ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17, "RTL8168b/8111b" },
+ /* This one is very old and rare, support has been removed.
+ * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11, "RTL8168b/8111b" },
+ */
+
+ /* 8101 family. */
+ { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39, "RTL8106e", FIRMWARE_8106E_1 },
+ { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37, "RTL8402", FIRMWARE_8402_1 },
+ { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29, "RTL8105e", FIRMWARE_8105E_1 },
+ { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30, "RTL8105e", FIRMWARE_8105E_1 },
+ { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08, "RTL8102e" },
+ { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08, "RTL8102e" },
+ { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07, "RTL8102e" },
+ { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07, "RTL8102e" },
+ { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14, "RTL8401" },
+ { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09, "RTL8102e/RTL8103e" },
+ { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09, "RTL8102e/RTL8103e" },
+ { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10, "RTL8101e/RTL8100e" },
+
+ /* 8110 family. */
+ { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06, "RTL8169sc/8110sc" },
+ { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05, "RTL8169sc/8110sc" },
+ { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04, "RTL8169sb/8110sb" },
+ { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03, "RTL8110s" },
+ { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02, "RTL8169s" },
+
+ /* Catch-all */
+ { 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -166,7 +226,10 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
{ PCI_VDEVICE(REALTEK, 0x8125) },
{ PCI_VDEVICE(REALTEK, 0x8126) },
+ { PCI_VDEVICE(REALTEK, 0x8127) },
{ PCI_VDEVICE(REALTEK, 0x3000) },
+ { PCI_VDEVICE(REALTEK, 0x5000) },
+ { PCI_VDEVICE(REALTEK, 0x0e10) },
{}
};
@@ -623,7 +686,6 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_RESET_PENDING,
- RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX
};
@@ -632,6 +694,7 @@ enum rtl_dash_type {
RTL_DASH_NONE,
RTL_DASH_DP,
RTL_DASH_EP,
+ RTL_DASH_25_BP,
};
struct rtl8169_private {
@@ -708,8 +771,11 @@ MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
MODULE_FIRMWARE(FIRMWARE_8125D_1);
+MODULE_FIRMWARE(FIRMWARE_8125D_2);
+MODULE_FIRMWARE(FIRMWARE_8125BP_2);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
MODULE_FIRMWARE(FIRMWARE_8126A_3);
+MODULE_FIRMWARE(FIRMWARE_8127A_1);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
@@ -771,7 +837,7 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
tp->mac_version != RTL_GIGA_MAC_VER_39 &&
- tp->mac_version <= RTL_GIGA_MAC_VER_53;
+ tp->mac_version <= RTL_GIGA_MAC_VER_52;
}
static bool rtl_supports_eee(struct rtl8169_private *tp)
@@ -939,9 +1005,7 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
{
/* based on RTL8168FP_OOBMAC_BASE in vendor driver */
- if (type == ERIAR_OOB &&
- (tp->mac_version == RTL_GIGA_MAC_VER_52 ||
- tp->mac_version == RTL_GIGA_MAC_VER_53))
+ if (type == ERIAR_OOB && tp->mac_version == RTL_GIGA_MAC_VER_52)
*cmd |= 0xf70 << 18;
}
@@ -1230,7 +1294,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1245,7 +1309,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1360,10 +1424,19 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
+static void rtl8125bp_driver_start(struct rtl8169_private *tp)
+{
+ r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_START);
+ r8168ep_ocp_write(tp, 0x01, 0x18, 0x00);
+ r8168ep_ocp_write(tp, 0x01, 0x10, 0x01);
+}
+
static void rtl8168_driver_start(struct rtl8169_private *tp)
{
if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_start(tp);
+ else if (tp->dash_type == RTL_DASH_25_BP)
+ rtl8125bp_driver_start(tp);
else
rtl8168ep_driver_start(tp);
}
@@ -1384,10 +1457,19 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
+static void rtl8125bp_driver_stop(struct rtl8169_private *tp)
+{
+ r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_STOP);
+ r8168ep_ocp_write(tp, 0x01, 0x18, 0x00);
+ r8168ep_ocp_write(tp, 0x01, 0x10, 0x01);
+}
+
static void rtl8168_driver_stop(struct rtl8169_private *tp)
{
if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_stop(tp);
+ else if (tp->dash_type == RTL_DASH_25_BP)
+ rtl8125bp_driver_stop(tp);
else
rtl8168ep_driver_stop(tp);
}
@@ -1410,6 +1492,7 @@ static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
case RTL_DASH_DP:
return r8168dp_check_dash(tp);
case RTL_DASH_EP:
+ case RTL_DASH_25_BP:
return r8168ep_check_dash(tp);
default:
return false;
@@ -1422,8 +1505,10 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return RTL_DASH_DP;
- case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52:
return RTL_DASH_EP;
+ case RTL_GIGA_MAC_VER_66:
+ return RTL_DASH_25_BP;
default:
return RTL_DASH_NONE;
}
@@ -1576,7 +1661,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_LAST:
r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts);
break;
default:
@@ -2049,7 +2134,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
tp->tx_lpi_timer = timer_val;
r8168_mac_ocp_write(tp, 0xe048, timer_val);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
tp->tx_lpi_timer = timer_val;
RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
break;
@@ -2238,147 +2323,30 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_eth_ctrl_stats = rtl8169_get_eth_ctrl_stats,
};
-static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
+static const struct rtl_chip_info *rtl8169_get_chip_version(u16 xid, bool gmii)
{
- /*
- * The driver currently handles the 8168Bf and the 8168Be identically
- * but they can be identified more specifically through the test below
- * if needed:
- *
- * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
- *
- * Same thing for the 8101Eb and the 8101Ec:
- *
- * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
- */
- static const struct rtl_mac_info {
- u16 mask;
- u16 val;
- enum mac_version ver;
- } mac_info[] = {
- /* 8126A family. */
- { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_66 },
- { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
-
- /* 8125D family. */
- { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64 },
-
- /* 8125B family. */
- { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
-
- /* 8125A family. */
- { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61 },
- /* It seems only XID 609 made it to the mass market.
- * { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
- * { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
- */
-
- /* RTL8117 */
- { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 },
- { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 },
-
- /* 8168EP family. */
- { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
- * { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
- */
-
- /* 8168H family. */
- { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
- */
- /* Realtek calls it RTL8168M, but it's handled like RTL8168H */
- { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46 },
-
- /* 8168G family. */
- { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
- { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
- */
- { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 },
-
- /* 8168F family. */
- { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
- { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
- { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
-
- /* 8168E family. */
- { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 },
- { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 },
- { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 },
-
- /* 8168D family. */
- { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 },
- { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
-
- /* 8168DP family. */
- /* It seems this early RTL8168dp version never made it to
- * the wild. Support has been removed.
- * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
- */
- { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
- { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
-
- /* 8168C family. */
- { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 },
- { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 },
- { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 },
- { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 },
- { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 },
- { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 },
- { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 },
-
- /* 8168B family. */
- { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
- /* This one is very old and rare, let's see if anybody complains.
- * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
- */
-
- /* 8101 family. */
- { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 },
- { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 },
- { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 },
- { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 },
- { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 },
- { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 },
- { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 },
- { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10 },
-
- /* 8110 family. */
- { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
- { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 },
- { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 },
- { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 },
- { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 },
-
- /* Catch-all */
- { 0x000, 0x000, RTL_GIGA_MAC_NONE }
+ /* Chips combining a 1Gbps MAC with a 100Mbps PHY */
+ static const struct rtl_chip_info rtl8106eus_info = {
+ .mac_version = RTL_GIGA_MAC_VER_43,
+ .name = "RTL8106eus",
+ .fw_name = FIRMWARE_8106E_2,
+ };
+ static const struct rtl_chip_info rtl8107e_info = {
+ .mac_version = RTL_GIGA_MAC_VER_48,
+ .name = "RTL8107e",
+ .fw_name = FIRMWARE_8107E_2,
};
- const struct rtl_mac_info *p = mac_info;
- enum mac_version ver;
+ const struct rtl_chip_info *p = rtl_chip_infos;
while ((xid & p->mask) != p->val)
p++;
- ver = p->ver;
- if (ver != RTL_GIGA_MAC_NONE && !gmii) {
- if (ver == RTL_GIGA_MAC_VER_42)
- ver = RTL_GIGA_MAC_VER_43;
- else if (ver == RTL_GIGA_MAC_VER_46)
- ver = RTL_GIGA_MAC_VER_48;
- }
+ if (p->mac_version == RTL_GIGA_MAC_VER_42 && !gmii)
+ return &rtl8106eus_info;
+ if (p->mac_version == RTL_GIGA_MAC_VER_46 && !gmii)
+ return &rtl8107e_info;
- return ver;
+ return p;
}
static void rtl_release_firmware(struct rtl8169_private *tp)
@@ -2522,13 +2490,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_61:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2653,14 +2621,14 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond_2)
static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2821,10 +2789,45 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(tp, CSIDR) : ~0;
}
+static void rtl_csi_mod(struct rtl8169_private *tp, int addr,
+ u32 mask, u32 set)
+{
+ u32 val;
+
+ WARN(addr % 4, "Invalid CSI address %#x\n", addr);
+
+ netdev_notice_once(tp->dev,
+ "No native access to PCI extended config space, falling back to CSI\n");
+
+ val = rtl_csi_read(tp, addr);
+ rtl_csi_write(tp, addr, (val & ~mask) | set);
+}
+
+static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ int rc;
+ u8 val;
+
+#define RTL_GEN3_RELATED_OFF 0x0890
+#define RTL_GEN3_ZRXDC_NONCOMPL 0x1
+ if (pdev->cfg_size > RTL_GEN3_RELATED_OFF) {
+ rc = pci_read_config_byte(pdev, RTL_GEN3_RELATED_OFF, &val);
+ if (rc == PCIBIOS_SUCCESSFUL) {
+ val &= ~RTL_GEN3_ZRXDC_NONCOMPL;
+ rc = pci_write_config_byte(pdev, RTL_GEN3_RELATED_OFF,
+ val);
+ if (rc == PCIBIOS_SUCCESSFUL)
+ return;
+ }
+ }
+
+ rtl_csi_mod(tp, RTL_GEN3_RELATED_OFF, RTL_GEN3_ZRXDC_NONCOMPL, 0);
+}
+
static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
{
struct pci_dev *pdev = tp->pci_dev;
- u32 csi;
/* According to Realtek the value at config space address 0x070f
* controls the L0s/L1 entrance latency. We try standard ECAM access
@@ -2836,10 +2839,7 @@ static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
return;
- netdev_notice_once(tp->dev,
- "No native access to PCI extended config space, falling back to CSI\n");
- csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
- rtl_csi_write(tp, 0x070c, csi | val << 24);
+ rtl_csi_mod(tp, 0x070c, 0xff000000, val << 24);
}
static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
@@ -2903,7 +2903,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2917,7 +2917,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -2943,8 +2943,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
rtl_mod_config5(tp, 0, ASPM_en);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_80:
val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -2955,7 +2955,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2967,7 +2967,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
@@ -2975,8 +2975,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_80:
val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3696,12 +3696,13 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
/* disable new tx descriptor format */
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_80)
RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0f00, 0x0f00);
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_70)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
@@ -3719,8 +3720,8 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_80)
r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
else
r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
@@ -3793,6 +3794,13 @@ static void rtl_hw_start_8125d(struct rtl8169_private *tp)
static void rtl_hw_start_8126a(struct rtl8169_private *tp)
{
+ rtl_disable_zrxdc_timeout(tp);
+ rtl_set_def_aspm_entry_latency(tp);
+ rtl_hw_start_8125_common(tp);
+}
+
+static void rtl_hw_start_8127a(struct rtl8169_private *tp)
+{
rtl_set_def_aspm_entry_latency(tp);
rtl_hw_start_8125_common(tp);
}
@@ -3804,7 +3812,6 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
@@ -3836,12 +3843,12 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
- [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
[RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d,
- [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
- [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8125d,
+ [RTL_GIGA_MAC_VER_70] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_80] = rtl_hw_start_8127a,
};
if (hw_configs[tp->mac_version])
@@ -3858,12 +3865,15 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_64:
+ case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_80:
for (i = 0xa00; i < 0xb00; i += 4)
RTL_W32(tp, i, 0);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ RTL_W16(tp, INT_CFG1_8125, 0x0000);
break;
case RTL_GIGA_MAC_VER_63:
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
for (i = 0xa00; i < 0xa80; i += 4)
RTL_W32(tp, i, 0);
RTL_W16(tp, INT_CFG1_8125, 0x0000);
@@ -4095,7 +4105,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4252,7 +4262,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -4680,12 +4690,6 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (status & LinkChg)
phy_mac_interrupt(tp->phydev);
- if (unlikely(status & RxFIFOOver &&
- tp->mac_version == RTL_GIGA_MAC_VER_11)) {
- netif_stop_queue(tp->dev);
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
- }
-
rtl_irq_disable(tp);
napi_schedule(&tp->napi);
out:
@@ -4723,8 +4727,6 @@ static void rtl_task(struct work_struct *work)
reset:
rtl_reset_work(tp);
netif_wake_queue(tp->dev);
- } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
- rtl_reset_work(tp);
}
}
@@ -5048,10 +5050,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */
rtl_rar_set(tp, tp->dev->perm_addr);
- if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
- pci_wake_from_d3(pdev, tp->saved_wolopts);
- pci_set_power_state(pdev, PCI_D3hot);
- }
+ if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled)
+ pci_prepare_to_sleep(pdev);
}
static void rtl_remove_one(struct pci_dev *pdev)
@@ -5103,9 +5103,6 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp)
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
tp->irq_mask |= SYSErr | RxFIFOOver;
- else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
- /* special workaround needed */
- tp->irq_mask |= RxFIFOOver;
}
static int rtl_alloc_irq(struct rtl8169_private *tp)
@@ -5178,6 +5175,33 @@ static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
return 0;
}
+static int r8169_mdio_read_reg_c45(struct mii_bus *mii_bus, int addr,
+ int devnum, int regnum)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (addr > 0)
+ return -ENODEV;
+
+ if (devnum == MDIO_MMD_VEND2 && regnum > MDIO_STAT2)
+ return r8168_phy_ocp_read(tp, regnum);
+
+ return 0;
+}
+
+static int r8169_mdio_write_reg_c45(struct mii_bus *mii_bus, int addr,
+ int devnum, int regnum, u16 val)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (addr > 0 || devnum != MDIO_MMD_VEND2 || regnum <= MDIO_STAT2)
+ return -ENODEV;
+
+ r8168_phy_ocp_write(tp, regnum, val);
+
+ return 0;
+}
+
static int r8169_mdio_register(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
@@ -5201,12 +5225,18 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_MAC_INTERRUPT;
+ new_bus->phy_mask = GENMASK(31, 1);
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
pci_domain_nr(pdev->bus), pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_40) {
+ new_bus->read_c45 = r8169_mdio_read_reg_c45;
+ new_bus->write_c45 = r8169_mdio_write_reg_c45;
+ }
+
ret = devm_mdiobus_register(&pdev->dev, new_bus);
if (ret)
return ret;
@@ -5230,9 +5260,9 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
/* mimic behavior of r8125/r8126 vendor drivers */
if (tp->mac_version == RTL_GIGA_MAC_VER_61)
- phy_set_eee_broken(tp->phydev,
- ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
- phy_set_eee_broken(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
+ phy_disable_eee_mode(tp->phydev,
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
+ phy_disable_eee_mode(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
/* PHY will be woken up in rtl_open() */
phy_suspend(tp->phydev);
@@ -5275,13 +5305,13 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_stop_cmac(tp);
fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
rtl_hw_init_8125(tp);
break;
default:
@@ -5300,12 +5330,14 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
return JUMBO_7K;
/* RTL8168b */
- case RTL_GIGA_MAC_VER_11:
case RTL_GIGA_MAC_VER_17:
return JUMBO_4K;
/* RTL8168c */
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
return JUMBO_6K;
+ /* RTL8125/8126 */
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
+ return JUMBO_16K;
default:
return JUMBO_9K;
}
@@ -5340,55 +5372,18 @@ done:
/* register is set if system vendor successfully tested ASPM 1.2 */
static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_46 &&
r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
return true;
return false;
}
-static umode_t r8169_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- return 0444;
-}
-
-static int r8169_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- struct rtl8169_private *tp = dev_get_drvdata(dev);
- int val_raw;
-
- val_raw = phy_read_paged(tp->phydev, 0xbd8, 0x12) & 0x3ff;
- if (val_raw >= 512)
- val_raw -= 1024;
-
- *val = 1000 * val_raw / 2;
-
- return 0;
-}
-
-static const struct hwmon_ops r8169_hwmon_ops = {
- .is_visible = r8169_hwmon_is_visible,
- .read = r8169_hwmon_read,
-};
-
-static const struct hwmon_channel_info * const r8169_hwmon_info[] = {
- HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
- NULL
-};
-
-static const struct hwmon_chip_info r8169_hwmon_chip_info = {
- .ops = &r8169_hwmon_ops,
- .info = r8169_hwmon_info,
-};
-
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ const struct rtl_chip_info *chip;
struct rtl8169_private *tp;
int jumbo_max, region, rc;
- enum mac_version chipset;
struct net_device *dev;
u32 txconfig;
u16 xid;
@@ -5426,11 +5421,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (region < 0)
return dev_err_probe(&pdev->dev, -ENODEV, "no MMIO resource found\n");
- rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME);
- if (rc < 0)
- return dev_err_probe(&pdev->dev, rc, "cannot remap MMIO, aborting\n");
-
- tp->mmio_addr = pcim_iomap_table(pdev)[region];
+ tp->mmio_addr = pcim_iomap_region(pdev, region, KBUILD_MODNAME);
+ if (IS_ERR(tp->mmio_addr))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tp->mmio_addr),
+ "cannot remap MMIO, aborting\n");
txconfig = RTL_R32(tp, TxConfig);
if (txconfig == ~0U)
@@ -5439,12 +5433,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
xid = (txconfig >> 20) & 0xfcf;
/* Identify chip attached to board */
- chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
- if (chipset == RTL_GIGA_MAC_NONE)
+ chip = rtl8169_get_chip_version(xid, tp->supports_gmii);
+ if (chip->mac_version == RTL_GIGA_MAC_NONE)
return dev_err_probe(&pdev->dev, -ENODEV,
"unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n",
xid);
- tp->mac_version = chipset;
+ tp->mac_version = chip->mac_version;
+ tp->fw_name = chip->fw_name;
/* Disable ASPM L1 as that cause random device stop working
* problems as well as full system hangs for some PCIe devices users.
@@ -5549,8 +5544,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_set_irq_mask(tp);
- tp->fw_name = rtl_chip_infos[chipset].fw_name;
-
tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
&tp->counters_phys_addr,
GFP_KERNEL);
@@ -5563,12 +5556,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* The temperature sensor is available from RTl8125B */
- if (IS_REACHABLE(CONFIG_HWMON) && tp->mac_version >= RTL_GIGA_MAC_VER_63)
- /* ignore errors */
- devm_hwmon_device_register_with_info(&pdev->dev, "nic_temp", tp,
- &r8169_hwmon_chip_info,
- NULL);
rc = register_netdev(dev);
if (rc)
return rc;
@@ -5581,7 +5568,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
+ chip->name, dev->dev_addr, xid, tp->irq);
if (jumbo_max)
netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 5307c6ff4e25..032d9d2cfa2a 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -50,6 +50,15 @@ static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
phy_restore_page(phydev, oldpage, 0);
}
+static void rtl8125_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ phy_lock_mdio_bus(phydev);
+ __phy_write_mmd(phydev, MDIO_MMD_VEND2, 0xb87c, parm);
+ __phy_modify_mmd(phydev, MDIO_MMD_VEND2, 0xb87e, mask, val);
+ phy_unlock_mdio_bus(phydev);
+}
+
struct phy_reg {
u16 reg;
u16 val;
@@ -276,15 +285,6 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp,
rtl_writephy_batch(phydev, phy_reg_init);
}
-static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- phy_write(phydev, 0x1f, 0x0001);
- phy_set_bits(phydev, 0x16, BIT(0));
- phy_write(phydev, 0x10, 0xf41b);
- phy_write(phydev, 0x1f, 0x0000);
-}
-
static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1013,12 +1013,8 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80a2);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x16, 0x809c);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x1f, 0x0000);
+ rtl8125_phy_param(phydev, 0x80a2, 0xffff, 0x0153);
+ rtl8125_phy_param(phydev, 0x809c, 0xffff, 0x0153);
phy_write(phydev, 0x1f, 0x0a43);
phy_write(phydev, 0x13, 0x81B3);
@@ -1070,14 +1066,9 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090);
phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001);
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80f5);
- phy_write(phydev, 0x17, 0x760e);
- phy_write(phydev, 0x16, 0x8107);
- phy_write(phydev, 0x17, 0x360e);
- phy_write(phydev, 0x16, 0x8551);
- phy_modify(phydev, 0x17, 0xff00, 0x0800);
- phy_write(phydev, 0x1f, 0x0000);
+ rtl8125_phy_param(phydev, 0x80f5, 0xffff, 0x760e);
+ rtl8125_phy_param(phydev, 0x8107, 0xffff, 0x360e);
+ rtl8125_phy_param(phydev, 0x8551, 0xff00, 0x0800);
phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000);
phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300);
@@ -1111,6 +1102,24 @@ static void rtl8125d_hw_phy_config(struct rtl8169_private *tp,
rtl8125_config_eee_phy(phydev);
}
+static void rtl8125bp_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+
+ r8168g_phy_param(phydev, 0x8010, 0x0800, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x8088, 0xff00, 0x9000);
+ rtl8125_phy_param(phydev, 0x808f, 0xff00, 0x9000);
+
+ r8168g_phy_param(phydev, 0x8174, 0x2000, 0x1800);
+
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_config_eee_phy(phydev);
+}
+
static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1121,6 +1130,171 @@ static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
rtl8125_common_config_eee_phy(phydev);
}
+static void rtl8127a_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+
+ r8168g_phy_param(phydev, 0x8415, 0xff00, 0x9300);
+ r8168g_phy_param(phydev, 0x81a3, 0xff00, 0x0f00);
+ r8168g_phy_param(phydev, 0x81ae, 0xff00, 0x0f00);
+ r8168g_phy_param(phydev, 0x81b9, 0xff00, 0xb900);
+ rtl8125_phy_param(phydev, 0x83b0, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83C5, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83da, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83ef, 0x0e00, 0x0000);
+ phy_modify_paged(phydev, 0x0bf3, 0x14, 0x01f0, 0x0160);
+ phy_modify_paged(phydev, 0x0bf3, 0x15, 0x001f, 0x0014);
+ phy_modify_paged(phydev, 0x0bf2, 0x14, 0x6000, 0x0000);
+ phy_modify_paged(phydev, 0x0bf2, 0x16, 0xc000, 0x0000);
+ phy_modify_paged(phydev, 0x0bf2, 0x14, 0x1fff, 0x0187);
+ phy_modify_paged(phydev, 0x0bf2, 0x15, 0x003f, 0x0003);
+
+ r8168g_phy_param(phydev, 0x8173, 0xffff, 0x8620);
+ r8168g_phy_param(phydev, 0x8175, 0xffff, 0x8671);
+ r8168g_phy_param(phydev, 0x817c, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x8187, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x8192, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x819d, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x81a8, 0x2000, 0x0000);
+ r8168g_phy_param(phydev, 0x81b3, 0x2000, 0x0000);
+ r8168g_phy_param(phydev, 0x81be, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x817d, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x8188, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x8193, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x819e, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x81a9, 0xff00, 0x1400);
+ r8168g_phy_param(phydev, 0x81b4, 0xff00, 0x1400);
+ r8168g_phy_param(phydev, 0x81bf, 0xff00, 0xa600);
+
+ phy_modify_paged(phydev, 0x0aea, 0x15, 0x0028, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x84f0, 0xffff, 0x201c);
+ rtl8125_phy_param(phydev, 0x84f2, 0xffff, 0x3117);
+
+ phy_write_paged(phydev, 0x0aec, 0x13, 0x0000);
+ phy_write_paged(phydev, 0x0ae2, 0x10, 0xffff);
+ phy_write_paged(phydev, 0x0aec, 0x17, 0xffff);
+ phy_write_paged(phydev, 0x0aed, 0x11, 0xffff);
+ phy_write_paged(phydev, 0x0aec, 0x14, 0x0000);
+ phy_modify_paged(phydev, 0x0aed, 0x10, 0x0001, 0x0000);
+ phy_write_paged(phydev, 0x0adb, 0x14, 0x0150);
+ rtl8125_phy_param(phydev, 0x8197, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x8231, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x82cb, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x82cd, 0xff00, 0x5700);
+ rtl8125_phy_param(phydev, 0x8233, 0xff00, 0x5700);
+ rtl8125_phy_param(phydev, 0x8199, 0xff00, 0x5700);
+
+ rtl8125_phy_param(phydev, 0x815a, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x81f4, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x828e, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x81b1, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x824b, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x82e5, 0xffff, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x84f7, 0xff00, 0x2800);
+ phy_modify_paged(phydev, 0x0aec, 0x11, 0x0000, 0x1000);
+ rtl8125_phy_param(phydev, 0x81b3, 0xff00, 0xad00);
+ rtl8125_phy_param(phydev, 0x824d, 0xff00, 0xad00);
+ rtl8125_phy_param(phydev, 0x82e7, 0xff00, 0xad00);
+ phy_modify_paged(phydev, 0x0ae4, 0x17, 0x000f, 0x0001);
+ rtl8125_phy_param(phydev, 0x82ce, 0xf000, 0x4000);
+
+ rtl8125_phy_param(phydev, 0x84ac, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x84ae, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x84b0, 0xffff, 0xf818);
+ rtl8125_phy_param(phydev, 0x84b2, 0xff00, 0x6000);
+
+ rtl8125_phy_param(phydev, 0x8ffc, 0xffff, 0x6008);
+ rtl8125_phy_param(phydev, 0x8ffe, 0xffff, 0xf450);
+
+ rtl8125_phy_param(phydev, 0x8015, 0x0000, 0x0200);
+ rtl8125_phy_param(phydev, 0x8016, 0x0800, 0x0000);
+ rtl8125_phy_param(phydev, 0x8fe6, 0xff00, 0x0800);
+ rtl8125_phy_param(phydev, 0x8fe4, 0xffff, 0x2114);
+
+ rtl8125_phy_param(phydev, 0x8647, 0xffff, 0xa7b1);
+ rtl8125_phy_param(phydev, 0x8649, 0xffff, 0xbbca);
+ rtl8125_phy_param(phydev, 0x864b, 0xff00, 0xdc00);
+
+ rtl8125_phy_param(phydev, 0x8154, 0xc000, 0x4000);
+ rtl8125_phy_param(phydev, 0x8158, 0xc000, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x826c, 0xffff, 0xffff);
+ rtl8125_phy_param(phydev, 0x826e, 0xffff, 0xffff);
+
+ rtl8125_phy_param(phydev, 0x8872, 0xff00, 0x0e00);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x0800);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x4000);
+ phy_modify_paged(phydev, 0x0b57, 0x13, 0x0000, 0x0001);
+ r8168g_phy_param(phydev, 0x834a, 0xff00, 0x0700);
+ rtl8125_phy_param(phydev, 0x8217, 0x3f00, 0x2a00);
+ r8168g_phy_param(phydev, 0x81b1, 0xff00, 0x0b00);
+ rtl8125_phy_param(phydev, 0x8fed, 0xff00, 0x4e00);
+
+ rtl8125_phy_param(phydev, 0x88ac, 0xff00, 0x2300);
+ phy_modify_paged(phydev, 0x0bf0, 0x16, 0x0000, 0x3800);
+ rtl8125_phy_param(phydev, 0x88de, 0xff00, 0x0000);
+ rtl8125_phy_param(phydev, 0x80b4, 0xffff, 0x5195);
+
+ r8168g_phy_param(phydev, 0x8370, 0xffff, 0x8671);
+ r8168g_phy_param(phydev, 0x8372, 0xffff, 0x86c8);
+
+ r8168g_phy_param(phydev, 0x8401, 0xffff, 0x86c8);
+ r8168g_phy_param(phydev, 0x8403, 0xffff, 0x86da);
+ r8168g_phy_param(phydev, 0x8406, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8408, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840a, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840c, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840e, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8410, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8412, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8414, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8416, 0x1800, 0x1000);
+
+ r8168g_phy_param(phydev, 0x82bd, 0xffff, 0x1f40);
+
+ phy_modify_paged(phydev, 0x0bfb, 0x12, 0x07ff, 0x0328);
+ phy_write_paged(phydev, 0x0bfb, 0x13, 0x3e14);
+
+ r8168g_phy_param(phydev, 0x81c4, 0xffff, 0x003b);
+ r8168g_phy_param(phydev, 0x81c6, 0xffff, 0x0086);
+ r8168g_phy_param(phydev, 0x81c8, 0xffff, 0x00b7);
+ r8168g_phy_param(phydev, 0x81ca, 0xffff, 0x00db);
+ r8168g_phy_param(phydev, 0x81cc, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81ce, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d0, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d2, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d4, 0xffff, 0x00c3);
+ r8168g_phy_param(phydev, 0x81d6, 0xffff, 0x0078);
+ r8168g_phy_param(phydev, 0x81d8, 0xffff, 0x0047);
+ r8168g_phy_param(phydev, 0x81da, 0xffff, 0x0023);
+
+ rtl8125_phy_param(phydev, 0x88d7, 0xffff, 0x01a0);
+ rtl8125_phy_param(phydev, 0x88d9, 0xffff, 0x01a0);
+ rtl8125_phy_param(phydev, 0x8ffa, 0xffff, 0x002a);
+
+ rtl8125_phy_param(phydev, 0x8fee, 0xffff, 0xffdf);
+ rtl8125_phy_param(phydev, 0x8ff0, 0xffff, 0xffff);
+ rtl8125_phy_param(phydev, 0x8ff2, 0xffff, 0x0a4a);
+ rtl8125_phy_param(phydev, 0x8ff4, 0xffff, 0xaa5a);
+ rtl8125_phy_param(phydev, 0x8ff6, 0xffff, 0x0a4a);
+
+ rtl8125_phy_param(phydev, 0x8ff8, 0xffff, 0xaa5a);
+ rtl8125_phy_param(phydev, 0x88d5, 0xff00, 0x0200);
+
+ r8168g_phy_param(phydev, 0x84bb, 0xff00, 0x0a00);
+ r8168g_phy_param(phydev, 0x84c0, 0xff00, 0x1600);
+
+ phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x0003);
+
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_common_config_eee_phy(phydev);
+}
+
void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
enum mac_version ver)
{
@@ -1136,7 +1310,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
[RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config,
[RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
[RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
@@ -1168,12 +1341,12 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
- [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
[RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config,
- [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
- [RTL_GIGA_MAC_VER_66] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_66] = rtl8125bp_hw_phy_config,
+ [RTL_GIGA_MAC_VER_70] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_80] = rtl8127a_1_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index dbc3f92eebc4..498cfe4d0cac 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -13,6 +13,7 @@
#define RTASE_HW_VER_906X_7XA 0x00800000
#define RTASE_HW_VER_906X_7XC 0x04000000
#define RTASE_HW_VER_907XD_V1 0x04800000
+#define RTASE_HW_VER_907XD_VA 0x08000000
#define RTASE_RX_DMA_BURST_256 4
#define RTASE_TX_DMA_BURST_UNLIMITED 7
@@ -169,6 +170,7 @@ enum rtase_registers {
#define RTASE_TC_MODE_MASK GENMASK(11, 10)
RTASE_TOKSEL = 0x2046,
+ RTASE_TXQCRDT_0 = 0x2500,
RTASE_RFIFONFULL = 0x4406,
RTASE_INT_MITI_TX = 0x0A00,
RTASE_INT_MITI_RX = 0x0A80,
@@ -258,6 +260,12 @@ union rtase_rx_desc {
#define RTASE_VLAN_TAG_MASK GENMASK(15, 0)
#define RTASE_RX_PKT_SIZE_MASK GENMASK(13, 0)
+/* txqos hardware definitions */
+#define RTASE_1T_CLOCK 64
+#define RTASE_1T_POWER 10000000
+#define RTASE_IDLESLOPE_INT_SHIFT 25
+#define RTASE_IDLESLOPE_INT_MASK GENMASK(31, 25)
+
#define RTASE_IVEC_NAME_SIZE (IFNAMSIZ + 10)
struct rtase_int_vector {
@@ -293,6 +301,13 @@ struct rtase_ring {
u64 alloc_fail;
};
+struct rtase_txqos {
+ int hicredit;
+ int locredit;
+ int idleslope;
+ int sendslope;
+};
+
struct rtase_stats {
u64 tx_dropped;
u64 rx_dropped;
@@ -312,6 +327,7 @@ struct rtase_private {
struct page_pool *page_pool;
struct rtase_ring tx_ring[RTASE_NUM_TX_QUEUE];
+ struct rtase_txqos tx_qos[RTASE_NUM_TX_QUEUE];
struct rtase_ring rx_ring[RTASE_NUM_RX_QUEUE];
struct rtase_counters *tally_vaddr;
dma_addr_t tally_paddr;
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index de7f11232593..4d37217e9a14 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1114,7 +1114,7 @@ static int rtase_open(struct net_device *dev)
/* request other interrupts to handle multiqueue */
for (i = 1; i < tp->int_nums; i++) {
ivec = &tp->int_vector[i];
- snprintf(ivec->name, sizeof(ivec->name), "%s_int%i",
+ snprintf(ivec->name, sizeof(ivec->name), "%s_int%u",
tp->dev->name, i);
ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
ivec->name, ivec);
@@ -1501,7 +1501,10 @@ static void rtase_wait_for_quiescence(const struct net_device *dev)
static void rtase_sw_reset(struct net_device *dev)
{
struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_ring *ring, *tmp;
+ struct rtase_int_vector *ivec;
int ret;
+ u32 i;
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1512,6 +1515,13 @@ static void rtase_sw_reset(struct net_device *dev)
rtase_tx_clear(tp);
rtase_rx_clear(tp);
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
+ ring_entry)
+ list_del(&ring->ring_entry);
+ }
+
ret = rtase_init_ring(dev);
if (ret) {
netdev_err(dev, "unable to init ring\n");
@@ -1651,6 +1661,65 @@ static void rtase_get_stats64(struct net_device *dev,
stats->rx_length_errors = tp->stats.rx_length_errors;
}
+static void rtase_set_hw_cbs(const struct rtase_private *tp, u32 queue)
+{
+ u32 idle = tp->tx_qos[queue].idleslope * RTASE_1T_CLOCK;
+ u32 val, i;
+
+ val = u32_encode_bits(idle / RTASE_1T_POWER, RTASE_IDLESLOPE_INT_MASK);
+ idle %= RTASE_1T_POWER;
+
+ for (i = 1; i <= RTASE_IDLESLOPE_INT_SHIFT; i++) {
+ idle *= 2;
+ if ((idle / RTASE_1T_POWER) == 1)
+ val |= BIT(RTASE_IDLESLOPE_INT_SHIFT - i);
+
+ idle %= RTASE_1T_POWER;
+ }
+
+ rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, val);
+}
+
+static int rtase_setup_tc_cbs(struct rtase_private *tp,
+ const struct tc_cbs_qopt_offload *qopt)
+{
+ int queue = qopt->queue;
+
+ if (queue < 0 || queue >= tp->func_tx_queue_num)
+ return -EINVAL;
+
+ if (!qopt->enable) {
+ tp->tx_qos[queue].hicredit = 0;
+ tp->tx_qos[queue].locredit = 0;
+ tp->tx_qos[queue].idleslope = 0;
+ tp->tx_qos[queue].sendslope = 0;
+
+ rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, 0);
+ } else {
+ tp->tx_qos[queue].hicredit = qopt->hicredit;
+ tp->tx_qos[queue].locredit = qopt->locredit;
+ tp->tx_qos[queue].idleslope = qopt->idleslope;
+ tp->tx_qos[queue].sendslope = qopt->sendslope;
+
+ rtase_set_hw_cbs(tp, queue);
+ }
+
+ return 0;
+}
+
+static int rtase_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return rtase_setup_tc_cbs(tp, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static netdev_features_t rtase_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1686,6 +1755,7 @@ static const struct net_device_ops rtase_netdev_ops = {
.ndo_change_mtu = rtase_change_mtu,
.ndo_tx_timeout = rtase_tx_timeout,
.ndo_get_stats64 = rtase_get_stats64,
+ .ndo_setup_tc = rtase_setup_tc,
.ndo_fix_features = rtase_fix_features,
.ndo_set_features = rtase_set_features,
};
@@ -1725,6 +1795,7 @@ static int rtase_get_settings(struct net_device *dev,
cmd->base.speed = SPEED_5000;
break;
case RTASE_HW_VER_907XD_V1:
+ case RTASE_HW_VER_907XD_VA:
cmd->base.speed = SPEED_10000;
break;
}
@@ -1827,7 +1898,7 @@ static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp)
for (i = 0; i < tp->int_nums; i++) {
irq = pci_irq_vector(pdev, i);
- if (!irq) {
+ if (irq < 0) {
pci_disable_msix(pdev);
return irq;
}
@@ -1912,10 +1983,10 @@ static u16 rtase_calc_time_mitigation(u32 time_us)
u8 msb, time_count, time_unit;
u16 int_miti;
- time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME);
+ time_us = min(time_us, RTASE_MITI_MAX_TIME);
- msb = fls(time_us);
- if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
+ if (time_us > RTASE_MITI_TIME_COUNT_MASK) {
+ msb = fls(time_us);
time_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM);
} else {
@@ -1934,7 +2005,7 @@ static u16 rtase_calc_packet_num_mitigation(u16 pkt_num)
u8 msb, pkt_num_count, pkt_num_unit;
u16 int_miti;
- pkt_num = min_t(int, pkt_num, RTASE_MITI_MAX_PKT_NUM);
+ pkt_num = min(pkt_num, RTASE_MITI_MAX_PKT_NUM);
if (pkt_num > 60) {
pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX;
@@ -1993,6 +2064,7 @@ static int rtase_check_mac_version_valid(struct rtase_private *tp)
case RTASE_HW_VER_906X_7XA:
case RTASE_HW_VER_906X_7XC:
case RTASE_HW_VER_907XD_V1:
+ case RTASE_HW_VER_907XD_VA:
ret = 0;
break;
}
@@ -2016,7 +2088,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
SET_NETDEV_DEV(dev, &pdev->dev);
ret = pci_enable_device(pdev);
- if (ret < 0)
+ if (ret)
goto err_out_free_dev;
/* make sure PCI base addr 1 is MMIO */
@@ -2032,7 +2104,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
ret = pci_request_regions(pdev, KBUILD_MODNAME);
- if (ret < 0)
+ if (ret)
goto err_out_disable;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -2108,7 +2180,7 @@ static int rtase_init_one(struct pci_dev *pdev,
dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n");
ret = rtase_init_board(pdev, &dev, &ioaddr);
- if (ret != 0)
+ if (ret)
return ret;
tp = netdev_priv(dev);
@@ -2118,7 +2190,7 @@ static int rtase_init_one(struct pci_dev *pdev,
/* identify chip attached to board */
ret = rtase_check_mac_version_valid(tp);
- if (ret != 0) {
+ if (ret) {
dev_err(&pdev->dev,
"unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n",
tp->hw_ver);
@@ -2129,7 +2201,7 @@ static int rtase_init_one(struct pci_dev *pdev,
rtase_init_hardware(tp);
ret = rtase_alloc_interrupt(pdev, tp);
- if (ret < 0) {
+ if (ret) {
dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n");
goto err_out_del_napi;
}
@@ -2174,7 +2246,7 @@ static int rtase_init_one(struct pci_dev *pdev,
netif_carrier_off(dev);
ret = register_netdev(dev);
- if (ret != 0)
+ if (ret)
goto err_out_free_dma;
netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac0f093f647a..c9f4976a3527 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2763,6 +2763,7 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -3216,10 +3217,15 @@ static int ravb_suspend(struct device *dev)
netif_device_detach(ndev);
- if (priv->wol_enabled)
- return ravb_wol_setup(ndev);
+ rtnl_lock();
+ if (priv->wol_enabled) {
+ ret = ravb_wol_setup(ndev);
+ rtnl_unlock();
+ return ret;
+ }
ret = ravb_close(ndev);
+ rtnl_unlock();
if (ret)
return ret;
@@ -3244,19 +3250,20 @@ static int ravb_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
+ rtnl_lock();
/* If WoL is enabled restore the interface. */
- if (priv->wol_enabled) {
+ if (priv->wol_enabled)
ret = ravb_wol_restore(ndev);
- if (ret)
- return ret;
- } else {
+ else
ret = pm_runtime_force_resume(dev);
- if (ret)
- return ret;
+ if (ret) {
+ rtnl_unlock();
+ return ret;
}
/* Reopening the interface will restore the device to the working state. */
ret = ravb_open(ndev);
+ rtnl_unlock();
if (ret < 0)
goto out_rpm_put;
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 6e4ef7af27bf..226c6c0ab945 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -176,13 +176,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
struct net_device *ndev = priv->ndev;
unsigned long flags;
- /* Reject requests with unsupported flags */
- if (req->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
if (req->index)
return -EINVAL;
@@ -213,10 +206,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
unsigned long flags;
int error = 0;
- /* Reject requests with unsupported flags */
- if (req->flags)
- return -EOPNOTSUPP;
-
if (req->index)
return -EINVAL;
@@ -288,6 +277,7 @@ static const struct ptp_clock_info ravb_ptp_info = {
.max_adj = 50000000,
.n_ext_ts = N_EXT_TS,
.n_per_out = N_PER_OUT,
+ .supported_extts_flags = PTP_RISING_EDGE | PTP_FALLING_EDGE,
.adjfine = ravb_ptp_adjfine,
.adjtime = ravb_ptp_adjtime,
.gettime64 = ravb_ptp_gettime64,
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index 72e7fcc56693..4c3e8cc5046f 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -29,8 +29,8 @@ static const struct rcar_gen4_ptp_reg_offset gen4_offs = {
static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- bool neg_adj = scaled_ppm < 0 ? true : false;
s64 addend = ptp_priv->default_addend;
+ bool neg_adj = scaled_ppm < 0;
s64 diff;
if (neg_adj)
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 8d18dae4d8fb..aba772e14555 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -111,25 +111,35 @@ static void rswitch_top_init(struct rswitch_private *priv)
/* Forwarding engine block (MFWD) */
static void rswitch_fwd_init(struct rswitch_private *priv)
{
+ u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
unsigned int i;
- /* For ETHA */
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
+ /* Start with empty configuration */
+ for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
+ /* Disable all port features */
+ iowrite32(0, priv->addr + FWPC0(i));
+ /* Disallow L3 forwarding and direct descriptor forwarding */
+ iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask),
+ priv->addr + FWPC1(i));
+ /* Disallow L2 forwarding */
+ iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask),
+ priv->addr + FWPC2(i));
+ /* Disallow port based forwarding */
iowrite32(0, priv->addr + FWPBFC(i));
}
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ /* For enabled ETHA ports, setup port based forwarding */
+ rswitch_for_each_enabled_port(priv, i) {
+ /* Port based forwarding from port i to GWCA port */
+ rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV,
+ FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index)));
+ /* Within GWCA port, forward to Rx queue for port i */
iowrite32(priv->rdev[i]->rx_queue->index,
priv->addr + FWPBFCSDC(GWCA_INDEX, i));
- iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
}
- /* For GWCA */
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
- iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
- iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
- iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
+ /* For GWCA port, allow direct descriptor forwarding */
+ rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
}
/* Gateway CPU agent block (GWCA) */
@@ -547,7 +557,6 @@ static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
desc = &gq->ts_ring[gq->ring_size];
desc->desc.die_dt = DT_LINKFIX;
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
- INIT_LIST_HEAD(&priv->gwca.ts_info_list);
return 0;
}
@@ -862,13 +871,10 @@ static void rswitch_tx_free(struct net_device *ndev)
struct rswitch_ext_desc *desc;
struct sk_buff *skb;
- for (; rswitch_get_num_cur_queues(gq) > 0;
- gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
- desc = &gq->tx_ring[gq->dirty];
- if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
- break;
-
+ desc = &gq->tx_ring[gq->dirty];
+ while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) {
dma_rmb();
+
skb = gq->skbs[gq->dirty];
if (skb) {
rdev->ndev->stats.tx_packets++;
@@ -879,7 +885,10 @@ static void rswitch_tx_free(struct net_device *ndev)
dev_kfree_skb_any(gq->skbs[gq->dirty]);
gq->skbs[gq->dirty] = NULL;
}
+
desc->desc.die_dt = DT_EEMPTY;
+ gq->dirty = rswitch_next_queue_index(gq, false, 1);
+ desc = &gq->tx_ring[gq->dirty];
}
}
@@ -908,8 +917,10 @@ retry:
if (napi_complete_done(napi, budget - quota)) {
spin_lock_irqsave(&priv->lock, flags);
- rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
- rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ if (test_bit(rdev->port, priv->opened_ports)) {
+ rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
+ rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ }
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1001,9 +1012,10 @@ static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
static void rswitch_ts(struct rswitch_private *priv)
{
struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
- struct rswitch_gwca_ts_info *ts_info, *ts_info2;
struct skb_shared_hwtstamps shhwtstamps;
struct rswitch_ts_desc *desc;
+ struct rswitch_device *rdev;
+ struct sk_buff *ts_skb;
struct timespec64 ts;
unsigned int num;
u32 tag, port;
@@ -1013,23 +1025,28 @@ static void rswitch_ts(struct rswitch_private *priv)
dma_rmb();
port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
- tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
-
- list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
- if (!(ts_info->port == port && ts_info->tag == tag))
- continue;
-
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- ts.tv_sec = __le32_to_cpu(desc->ts_sec);
- ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
- shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
- skb_tstamp_tx(ts_info->skb, &shhwtstamps);
- dev_consume_skb_irq(ts_info->skb);
- list_del(&ts_info->list);
- kfree(ts_info);
- break;
- }
+ if (unlikely(port >= RSWITCH_NUM_PORTS))
+ goto next;
+ rdev = priv->rdev[port];
+ tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
+ if (unlikely(tag >= TS_TAGS_PER_PORT))
+ goto next;
+ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
+ smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
+ clear_bit(tag, rdev->ts_skb_used);
+
+ if (unlikely(!ts_skb))
+ goto next;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ ts.tv_sec = __le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(ts_skb, &shhwtstamps);
+ dev_consume_skb_irq(ts_skb);
+
+next:
gq->cur = rswitch_next_queue_index(gq, true, 1);
desc = &gq->ts_ring[gq->cur];
}
@@ -1114,32 +1131,47 @@ static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
{
- u32 val;
+ u32 pis, lsc;
rswitch_etha_write_mac_address(etha, mac);
+ switch (etha->phy_interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ pis = MPIC_PIS_GMII;
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_5GBASER:
+ pis = MPIC_PIS_XGMII;
+ break;
+ default:
+ pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC));
+ break;
+ }
+
switch (etha->speed) {
case 100:
- val = MPIC_LSC_100M;
+ lsc = MPIC_LSC_100M;
break;
case 1000:
- val = MPIC_LSC_1G;
+ lsc = MPIC_LSC_1G;
break;
case 2500:
- val = MPIC_LSC_2_5G;
+ lsc = MPIC_LSC_2_5G;
break;
default:
- return;
+ lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC));
+ break;
}
- iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
+ rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC,
+ FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc));
}
static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
{
- rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
- MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
- rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
+ rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT,
+ FIELD_PREP(MPIC_PSMCS, etha->psmcs) |
+ FIELD_PREP(MPIC_PSMHT, 0x06));
}
static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
@@ -1168,42 +1200,29 @@ static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
}
-static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
- int phyad, int devad, int regad, int data)
+static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read,
+ unsigned int mmf, unsigned int pda,
+ unsigned int pra, unsigned int pop,
+ unsigned int prd)
{
- int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
u32 val;
int ret;
- if (devad == 0xffffffff)
- return -ENODEV;
-
- writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
-
- val = MPSM_PSME | MPSM_MFF_C45;
- iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
+ val = MPSM_PSME |
+ FIELD_PREP(MPSM_MFF, mmf) |
+ FIELD_PREP(MPSM_PDA, pda) |
+ FIELD_PREP(MPSM_PRA, pra) |
+ FIELD_PREP(MPSM_POP, pop) |
+ FIELD_PREP(MPSM_PRD, prd);
+ iowrite32(val, etha->addr + MPSM);
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
+ ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0);
if (ret)
return ret;
- rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
-
if (read) {
- writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- if (ret)
- return ret;
-
- ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
-
- rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- } else {
- iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
- etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
+ val = ioread32(etha->addr + MPSM);
+ ret = FIELD_GET(MPSM_PRD, val);
}
return ret;
@@ -1213,16 +1232,47 @@ static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
int regad)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
- return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
+
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_READ_C45, 0);
}
static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
int regad, u16 val)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
+
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
+
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_WRITE, val);
+}
+
+static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad)
+{
+ struct rswitch_etha *etha = bus->priv;
+
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_READ_C22, 0);
+}
+
+static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad,
+ int regad, u16 val)
+{
+ struct rswitch_etha *etha = bus->priv;
- return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_WRITE, val);
}
/* Call of_node_put(port) after done */
@@ -1237,17 +1287,14 @@ static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
if (!ports)
return NULL;
- for_each_child_of_node(ports, port) {
+ for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &index);
if (err < 0) {
port = NULL;
goto out;
}
- if (index == rdev->etha->index) {
- if (!of_device_is_available(port))
- port = NULL;
+ if (index == rdev->etha->index)
break;
- }
}
out:
@@ -1307,6 +1354,8 @@ static int rswitch_mii_register(struct rswitch_device *rdev)
mii_bus->priv = rdev->etha;
mii_bus->read_c45 = rswitch_etha_mii_read_c45;
mii_bus->write_c45 = rswitch_etha_mii_write_c45;
+ mii_bus->read = rswitch_etha_mii_read_c22;
+ mii_bus->write = rswitch_etha_mii_write_c22;
mii_bus->parent = &rdev->priv->pdev->dev;
mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
@@ -1527,7 +1576,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
{
unsigned int i;
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(priv->rdev[i]);
}
@@ -1538,20 +1587,20 @@ static int rswitch_open(struct net_device *ndev)
struct rswitch_device *rdev = netdev_priv(ndev);
unsigned long flags;
- phy_start(ndev->phydev);
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
napi_enable(&rdev->napi);
- netif_start_queue(ndev);
spin_lock_irqsave(&rdev->priv->lock, flags);
+ bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
spin_unlock_irqrestore(&rdev->priv->lock, flags);
- if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
- iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+ phy_start(ndev->phydev);
- bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
+ netif_start_queue(ndev);
return 0;
};
@@ -1559,31 +1608,34 @@ static int rswitch_open(struct net_device *ndev)
static int rswitch_stop(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ struct sk_buff *ts_skb;
unsigned long flags;
+ unsigned int tag;
netif_tx_stop_all_queues(ndev);
- bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
-
- if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
- iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
- list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
- if (ts_info->port != rdev->port)
- continue;
- dev_kfree_skb_irq(ts_info->skb);
- list_del(&ts_info->list);
- kfree(ts_info);
- }
+ phy_stop(ndev->phydev);
spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
spin_unlock_irqrestore(&rdev->priv->lock, flags);
- phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
+
+ for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
+ tag < TS_TAGS_PER_PORT;
+ tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) {
+ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
+ clear_bit(tag, rdev->ts_skb_used);
+ if (ts_skb)
+ dev_kfree_skb(ts_skb);
+ }
+
return 0;
};
@@ -1594,20 +1646,17 @@ static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- struct rswitch_gwca_ts_info *ts_info;
+ unsigned int tag;
- ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
- if (!ts_info)
+ tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
+ if (tag == TS_TAGS_PER_PORT)
return false;
+ smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
+ rdev->ts_skb[tag] = skb_get(skb);
+ set_bit(tag, rdev->ts_skb_used);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- rdev->ts_tag++;
- desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
-
- ts_info->skb = skb_get(skb);
- ts_info->port = rdev->port;
- ts_info->tag = rdev->ts_tag;
- list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
+ desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC);
skb_tx_timestamp(skb);
}
@@ -1681,8 +1730,11 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
goto err_kfree;
- gq->skbs[gq->cur] = skb;
- gq->unmap_addrs[gq->cur] = dma_addr_orig;
+ /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */
+ gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
+ gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
+
+ dma_wmb();
/* DT_FSTART should be set at last. So, this is reverse order. */
for (i = nr_desc; i-- > 0; ) {
@@ -1694,14 +1746,13 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
goto err_unmap;
}
- wmb(); /* gq->cur must be incremented after die_dt was set */
-
gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
return ret;
err_unmap:
+ gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL;
dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
err_kfree:
@@ -1889,7 +1940,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
rdev->np_port = rswitch_get_port_node(rdev);
rdev->disabled = !rdev->np_port;
err = of_get_ethdev_address(rdev->np_port, ndev);
- of_node_put(rdev->np_port);
if (err) {
if (is_valid_ether_addr(rdev->etha->mac_addr))
eth_hw_addr_set(ndev, rdev->etha->mac_addr);
@@ -1901,9 +1951,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
if (err < 0)
goto out_get_params;
- if (rdev->priv->gwca.speed < rdev->etha->speed)
- rdev->priv->gwca.speed = rdev->etha->speed;
-
err = rswitch_rxdmac_alloc(ndev);
if (err < 0)
goto out_rxdmac;
@@ -1919,6 +1966,7 @@ out_txdmac:
out_rxdmac:
out_get_params:
+ of_node_put(rdev->np_port);
netif_napi_del(&rdev->napi);
free_netdev(ndev);
@@ -1932,6 +1980,7 @@ static void rswitch_device_free(struct rswitch_private *priv, unsigned int index
rswitch_txdmac_free(ndev);
rswitch_rxdmac_free(ndev);
+ of_node_put(rdev->np_port);
netif_napi_del(&rdev->napi);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 72e3ff596d31..532192cbca4b 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -12,6 +12,7 @@
#define RSWITCH_MAX_NUM_QUEUES 128
+#define RSWITCH_NUM_AGENTS 5
#define RSWITCH_NUM_PORTS 3
#define rswitch_for_each_enabled_port(priv, i) \
for (i = 0; i < RSWITCH_NUM_PORTS; i++) \
@@ -724,35 +725,28 @@ enum rswitch_etha_mode {
#define EAVCC_VEM_SC_TAG (0x3 << 16)
-#define MPIC_PIS_MII 0x00
-#define MPIC_PIS_GMII 0x02
-#define MPIC_PIS_XGMII 0x04
-#define MPIC_LSC_SHIFT 3
-#define MPIC_LSC_100M (1 << MPIC_LSC_SHIFT)
-#define MPIC_LSC_1G (2 << MPIC_LSC_SHIFT)
-#define MPIC_LSC_2_5G (3 << MPIC_LSC_SHIFT)
-
-#define MDIO_READ_C45 0x03
-#define MDIO_WRITE_C45 0x01
+#define MPIC_PIS GENMASK(2, 0)
+#define MPIC_PIS_GMII 2
+#define MPIC_PIS_XGMII 4
+#define MPIC_LSC GENMASK(5, 3)
+#define MPIC_LSC_100M 1
+#define MPIC_LSC_1G 2
+#define MPIC_LSC_2_5G 3
+#define MPIC_PSMCS GENMASK(22, 16)
+#define MPIC_PSMHT GENMASK(26, 24)
#define MPSM_PSME BIT(0)
-#define MPSM_MFF_C45 BIT(2)
-#define MPSM_PRD_SHIFT 16
-#define MPSM_PRD_MASK GENMASK(31, MPSM_PRD_SHIFT)
-
-/* Completion flags */
-#define MMIS1_PAACS BIT(2) /* Address */
-#define MMIS1_PWACS BIT(1) /* Write */
-#define MMIS1_PRACS BIT(0) /* Read */
-#define MMIS1_CLEAR_FLAGS 0xf
-
-#define MPIC_PSMCS_SHIFT 16
-#define MPIC_PSMCS_MASK GENMASK(22, MPIC_PSMCS_SHIFT)
-#define MPIC_PSMCS(val) ((val) << MPIC_PSMCS_SHIFT)
-
-#define MPIC_PSMHT_SHIFT 24
-#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
-#define MPIC_PSMHT(val) ((val) << MPIC_PSMHT_SHIFT)
+#define MPSM_MFF BIT(2)
+#define MPSM_MMF_C22 0
+#define MPSM_MMF_C45 1
+#define MPSM_PDA GENMASK(7, 3)
+#define MPSM_PRA GENMASK(12, 8)
+#define MPSM_POP GENMASK(14, 13)
+#define MPSM_POP_ADDRESS 0
+#define MPSM_POP_WRITE 1
+#define MPSM_POP_READ_C22 2
+#define MPSM_POP_READ_C45 3
+#define MPSM_PRD GENMASK(31, 16)
#define MLVC_PLV BIT(16)
@@ -806,6 +800,7 @@ enum rswitch_gwca_mode {
#define CABPPFLC_INIT_VALUE 0x00800080
/* MFWD */
+#define FWPC0(i) (FWPC00 + (i) * 0x10)
#define FWPC0_LTHTA BIT(0)
#define FWPC0_IP4UE BIT(3)
#define FWPC0_IP4TE BIT(4)
@@ -819,15 +814,15 @@ enum rswitch_gwca_mode {
#define FWPC0_MACHMA BIT(27)
#define FWPC0_VLANSA BIT(28)
-#define FWPC0(i) (FWPC00 + (i) * 0x10)
-#define FWPC0_DEFAULT (FWPC0_LTHTA | FWPC0_IP4UE | FWPC0_IP4TE | \
- FWPC0_IP4OE | FWPC0_L2SE | FWPC0_IP4EA | \
- FWPC0_IPDSA | FWPC0_IPHLA | FWPC0_MACSDA | \
- FWPC0_MACHLA | FWPC0_MACHMA | FWPC0_VLANSA)
#define FWPC1(i) (FWPC10 + (i) * 0x10)
+#define FWCP1_LTHFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
#define FWPC1_DDE BIT(0)
-#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPC2(i) (FWPC20 + (i) * 0x10)
+#define FWCP2_LTWFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
+
+#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPBFC_PBDV GENMASK(RSWITCH_NUM_AGENTS - 1, 0)
#define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04)
@@ -972,14 +967,6 @@ struct rswitch_gwca_queue {
};
};
-struct rswitch_gwca_ts_info {
- struct sk_buff *skb;
- struct list_head list;
-
- int port;
- u8 tag;
-};
-
#define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
struct rswitch_gwca {
unsigned int index;
@@ -989,14 +976,13 @@ struct rswitch_gwca {
struct rswitch_gwca_queue *queues;
int num_queues;
struct rswitch_gwca_queue ts_queue;
- struct list_head ts_info_list;
DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
- int speed;
};
#define NUM_QUEUES_PER_NDEV 2
+#define TS_TAGS_PER_PORT 256
struct rswitch_device {
struct rswitch_private *priv;
struct net_device *ndev;
@@ -1004,7 +990,8 @@ struct rswitch_device {
void __iomem *addr;
struct rswitch_gwca_queue *tx_queue;
struct rswitch_gwca_queue *rx_queue;
- u8 ts_tag;
+ struct sk_buff *ts_skb[TS_TAGS_PER_PORT];
+ DECLARE_BITMAP(ts_skb_used, TS_TAGS_PER_PORT);
bool disabled;
int port;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 8887b8921009..5fc8027c92c7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3494,10 +3494,12 @@ static int sh_eth_suspend(struct device *dev)
netif_device_detach(ndev);
+ rtnl_lock();
if (mdp->wol_enabled)
ret = sh_eth_wol_setup(ndev);
else
ret = sh_eth_close(ndev);
+ rtnl_unlock();
return ret;
}
@@ -3511,10 +3513,12 @@ static int sh_eth_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
+ rtnl_lock();
if (mdp->wol_enabled)
ret = sh_eth_wol_restore(ndev);
else
ret = sh_eth_open(ndev);
+ rtnl_unlock();
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index fe0bf1d3217a..36af94a2e062 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2576,7 +2576,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_carrier_init(rocker_port);
dev->features |= NETIF_F_SG;
- dev->netns_local = true;
+ dev->netns_immutable = true;
/* MTU range: 68 - 9000 */
dev->min_mtu = ROCKER_PORT_MIN_MTU;
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 826990459fa4..61e50517c05b 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1933,7 +1933,7 @@ static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
/* Check if adding and already exists, or removing and can't find */
- if (!found != !removing) {
+ if (!found == removing) {
kfree(fdb);
if (!found && removing)
return 0;
@@ -1982,7 +1982,7 @@ err_out:
static void ofdpa_fdb_cleanup(struct timer_list *t)
{
- struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
+ struct ofdpa *ofdpa = timer_container_of(ofdpa, t, fdb_cleanup_timer);
struct ofdpa_port *ofdpa_port;
struct ofdpa_fdb_tbl_entry *entry;
struct hlist_node *tmp;
@@ -2386,7 +2386,7 @@ static void ofdpa_fini(struct rocker *rocker)
struct hlist_node *tmp;
int bkt;
- del_timer_sync(&ofdpa->fdb_cleanup_timer);
+ timer_delete_sync(&ofdpa->fdb_cleanup_timer);
flush_workqueue(rocker->rocker_owq);
spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 12c8396b6942..75bad561b352 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -91,7 +91,7 @@ void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
{
/* Exit and disable EEE in case of we are in LPI state. */
priv->hw->mac->reset_eee_mode(priv->ioaddr);
- del_timer_sync(&priv->eee_ctrl_timer);
+ timer_delete_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
}
@@ -104,7 +104,8 @@ void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
*/
static void sxgbe_eee_ctrl_timer(struct timer_list *t)
{
- struct sxgbe_priv_data *priv = from_timer(priv, t, eee_ctrl_timer);
+ struct sxgbe_priv_data *priv = timer_container_of(priv, t,
+ eee_ctrl_timer);
sxgbe_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
@@ -1012,7 +1013,7 @@ static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
*/
static void sxgbe_tx_timer(struct timer_list *t)
{
- struct sxgbe_tx_queue *p = from_timer(p, t, txtimer);
+ struct sxgbe_tx_queue *p = timer_container_of(p, t, txtimer);
sxgbe_tx_queue_clean(p);
}
@@ -1044,7 +1045,7 @@ static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
struct sxgbe_tx_queue *p = priv->txq[queue_num];
- del_timer_sync(&p->txtimer);
+ timer_delete_sync(&p->txtimer);
}
}
@@ -1208,7 +1209,7 @@ static int sxgbe_release(struct net_device *dev)
struct sxgbe_priv_data *priv = netdev_priv(dev);
if (priv->eee_enabled)
- del_timer_sync(&priv->eee_ctrl_timer);
+ timer_delete_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
if (dev->phydev) {
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 9319a2675e7b..20dad39b5ab9 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -170,7 +170,7 @@ ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start)
*/
static void ether3_ledoff(struct timer_list *t)
{
- struct dev_priv *private = from_timer(private, t, timer);
+ struct dev_priv *private = timer_container_of(private, t, timer);
struct net_device *dev = private->dev;
ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
@@ -181,7 +181,7 @@ static void ether3_ledoff(struct timer_list *t)
*/
static inline void ether3_ledon(struct net_device *dev)
{
- del_timer(&priv(dev)->timer);
+ timer_delete(&priv(dev)->timer);
priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */
add_timer(&priv(dev)->timer);
if (priv(dev)->regs.config2 & CFG2_CTRLO)
@@ -454,7 +454,7 @@ static void ether3_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned long flags;
- del_timer(&priv(dev)->timer);
+ timer_delete(&priv(dev)->timer);
local_irq_save(flags);
printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name);
@@ -851,7 +851,7 @@ static void ether3_remove(struct expansion_card *ec)
ecard_set_drvdata(ec, NULL);
unregister_netdev(dev);
- del_timer_sync(&priv(dev)->timer);
+ timer_delete_sync(&priv(dev)->timer);
free_netdev(dev);
ecard_release_resources(ec);
}
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 3eb55dcfa8a6..c4c43434f314 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -38,8 +38,9 @@ config SFC_MTD
default y
help
This exposes the on-board flash and/or EEPROM as MTD devices
- (e.g. /dev/mtd1). This is required to update the firmware or
- the boot configuration under Linux.
+ (e.g. /dev/mtd1). This is required to update the boot
+ configuration under Linux, or use some older userland tools to
+ update the firmware.
config SFC_MCDI_MON
bool "Solarflare SFC9100-family hwmon support"
depends on SFC && HWMON && !(SFC=y && HWMON=m)
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 8f446b9bd5ee..d99039ec468d 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -7,7 +7,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
mcdi_functions.o mcdi_filters.o mcdi_mon.o \
ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o \
- efx_devlink.o
+ efx_devlink.o efx_reflash.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o tc_bindings.o tc_counters.o \
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 452009ed7a43..47349c148c0c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3501,7 +3501,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
const struct efx_ef10_nvram_type_info *info;
- size_t size, erase_size, outlen;
+ size_t size, erase_size, write_size, outlen;
int type_idx = 0;
bool protected;
int rc;
@@ -3516,7 +3516,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (info->port != efx_port_num(efx))
return -ENODEV;
- rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &write_size,
+ &protected);
if (rc)
return rc;
if (protected &&
@@ -3561,6 +3562,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (!erase_size)
part->common.mtd.flags |= MTD_NO_ERASE;
+ part->common.mtd.writesize = write_size;
+
return 0;
}
@@ -4416,6 +4419,7 @@ const struct efx_nic_type efx_x4_nic_type = {
.can_rx_scatter = true,
.always_rx_scatter = true,
.option_descriptors = true,
+ .flash_auto_partition = true,
.min_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 7f7d560cb2b4..3a06e3b1bd6b 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -450,9 +450,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
net_dev->hw_enc_features |= efx->type->offload_features;
net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
- netif_set_tso_max_segs(net_dev,
- ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
- efx->mdio.dev = net_dev;
+ nic_data = efx->nic_data;
+ netif_set_tso_max_size(efx->net_dev, nic_data->tso_max_payload_len);
+ netif_set_tso_max_segs(efx->net_dev, nic_data->tso_max_payload_num_segs);
rc = efx_ef100_init_datapath_caps(efx);
if (rc < 0)
@@ -478,7 +478,6 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
/* Don't fail init if RSS setup doesn't work. */
efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
- nic_data = efx->nic_data;
rc = ef100_get_mac_address(efx, net_dev->perm_addr, CLIENT_HANDLE_SELF,
efx->type->is_vf);
if (rc)
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 62e674d6ff60..3ad95a4c8af2 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -887,8 +887,7 @@ static int ef100_process_design_param(struct efx_nic *efx,
case ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS:
/* We always put HDR_NUM_SEGS=1 in our TSO descriptors */
if (!reader->value) {
- netif_err(efx, probe, efx->net_dev,
- "TSO_MAX_HDR_NUM_SEGS < 1\n");
+ pci_err(efx->pci_dev, "TSO_MAX_HDR_NUM_SEGS < 1\n");
return -EOPNOTSUPP;
}
return 0;
@@ -901,32 +900,28 @@ static int ef100_process_design_param(struct efx_nic *efx,
*/
if (!reader->value || reader->value > EFX_MIN_DMAQ_SIZE ||
EFX_MIN_DMAQ_SIZE % (u32)reader->value) {
- netif_err(efx, probe, efx->net_dev,
- "%s size granularity is %llu, can't guarantee safety\n",
- reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
- reader->value);
+ pci_err(efx->pci_dev,
+ "%s size granularity is %llu, can't guarantee safety\n",
+ reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
+ reader->value);
return -EOPNOTSUPP;
}
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
nic_data->tso_max_payload_len = min_t(u64, reader->value,
GSO_LEGACY_MAX_SIZE);
- netif_set_tso_max_size(efx->net_dev,
- nic_data->tso_max_payload_len);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS:
nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff);
- netif_set_tso_max_segs(efx->net_dev,
- nic_data->tso_max_payload_num_segs);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES:
nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff);
return 0;
case ESE_EF100_DP_GZ_COMPAT:
if (reader->value) {
- netif_err(efx, probe, efx->net_dev,
- "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n",
- reader->value);
+ pci_err(efx->pci_dev,
+ "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n",
+ reader->value);
return -EOPNOTSUPP;
}
return 0;
@@ -946,10 +941,10 @@ static int ef100_process_design_param(struct efx_nic *efx,
* So the value of this shouldn't matter.
*/
if (reader->value != ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT)
- netif_dbg(efx, probe, efx->net_dev,
- "NIC has other than default VI_STRIDES (mask "
- "%#llx), early probing might use wrong one\n",
- reader->value);
+ pci_dbg(efx->pci_dev,
+ "NIC has other than default VI_STRIDES (mask "
+ "%#llx), early probing might use wrong one\n",
+ reader->value);
return 0;
case ESE_EF100_DP_GZ_RX_MAX_RUNT:
/* Driver doesn't look at L2_STATUS:LEN_ERR bit, so we don't
@@ -961,9 +956,9 @@ static int ef100_process_design_param(struct efx_nic *efx,
/* Host interface says "Drivers should ignore design parameters
* that they do not recognise."
*/
- netif_dbg(efx, probe, efx->net_dev,
- "Ignoring unrecognised design parameter %u\n",
- reader->type);
+ pci_dbg(efx->pci_dev,
+ "Ignoring unrecognised design parameter %u\n",
+ reader->type);
return 0;
}
}
@@ -999,13 +994,13 @@ static int ef100_check_design_params(struct efx_nic *efx)
*/
if (reader.state != EF100_TLV_TYPE) {
if (reader.state == EF100_TLV_TYPE_CONT)
- netif_err(efx, probe, efx->net_dev,
- "truncated design parameter (incomplete type %u)\n",
- reader.type);
+ pci_err(efx->pci_dev,
+ "truncated design parameter (incomplete type %u)\n",
+ reader.type);
else
- netif_err(efx, probe, efx->net_dev,
- "truncated design parameter %u\n",
- reader.type);
+ pci_err(efx->pci_dev,
+ "truncated design parameter %u\n",
+ reader.type);
rc = -EIO;
}
out:
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 650136dfc642..112e55b98ed3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -476,28 +476,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
/**************************************************************************
*
- * ioctls
- *
- *************************************************************************/
-
-/* Net device ioctl
- * Context: process, rtnl_lock() held.
- */
-static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- /* Convert phy_id from older PRTAD/DEVAD format */
- if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
- (data->phy_id & 0xfc00) == 0x0400)
- data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
-
- return mdio_mii_ioctl(&efx->mdio, data, cmd);
-}
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -593,7 +571,6 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
@@ -1201,7 +1178,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail1;
- efx->mdio.dev = net_dev;
pci_info(pci_dev, "Solarflare NIC detected\n");
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index c88ec3e24836..5a14d94163b1 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -1003,6 +1003,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
INIT_LIST_HEAD(&efx->vf_reps);
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
+ mutex_init(&efx->reflash_mutex);
efx->tx_queues_per_channel = 1;
efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE;
diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c
index 3cd750820fdd..d842c60dfc10 100644
--- a/drivers/net/ethernet/sfc/efx_devlink.c
+++ b/drivers/net/ethernet/sfc/efx_devlink.c
@@ -19,6 +19,7 @@
#include "mae.h"
#include "ef100_rep.h"
#endif
+#include "efx_reflash.h"
struct efx_devlink {
struct efx_nic *efx;
@@ -615,7 +616,19 @@ static int efx_devlink_info_get(struct devlink *devlink,
return 0;
}
+static int efx_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_devlink *devlink_private = devlink_priv(devlink);
+ struct efx_nic *efx = devlink_private->efx;
+
+ return efx_reflash_flash_firmware(efx, params->fw, extack);
+}
+
static const struct devlink_ops sfc_devlink_ops = {
+ .supported_flash_update_params = 0,
+ .flash_update = efx_devlink_flash_update,
.info_get = efx_devlink_info_get,
};
diff --git a/drivers/net/ethernet/sfc/efx_reflash.c b/drivers/net/ethernet/sfc/efx_reflash.c
new file mode 100644
index 000000000000..b12e95f1c80a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_reflash.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/crc32.h>
+#include <net/devlink.h>
+#include "efx_reflash.h"
+#include "net_driver.h"
+#include "fw_formats.h"
+#include "mcdi_pcol.h"
+#include "mcdi.h"
+
+/* Try to parse a Reflash header at the specified offset */
+static bool efx_reflash_parse_reflash_header(const struct firmware *fw,
+ size_t header_offset, u32 *type,
+ u32 *subtype, const u8 **data,
+ size_t *data_size)
+{
+ size_t header_end, trailer_offset, trailer_end;
+ u32 magic, version, payload_size, header_len;
+ const u8 *header, *trailer;
+ u32 expected_crc, crc;
+
+ if (check_add_overflow(header_offset, EFX_REFLASH_HEADER_LENGTH_OFST +
+ EFX_REFLASH_HEADER_LENGTH_LEN,
+ &header_end))
+ return false;
+ if (fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_REFLASH_HEADER_MAGIC_OFST);
+ if (magic != EFX_REFLASH_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_REFLASH_HEADER_VERSION_OFST);
+ if (version != EFX_REFLASH_HEADER_VERSION_VALUE)
+ return false;
+
+ payload_size = get_unaligned_le32(header + EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST);
+ header_len = get_unaligned_le32(header + EFX_REFLASH_HEADER_LENGTH_OFST);
+ if (check_add_overflow(header_offset, header_len, &trailer_offset) ||
+ check_add_overflow(trailer_offset, payload_size, &trailer_offset) ||
+ check_add_overflow(trailer_offset, EFX_REFLASH_TRAILER_LEN,
+ &trailer_end))
+ return false;
+ if (fw->size < trailer_end)
+ return false;
+
+ trailer = fw->data + trailer_offset;
+ expected_crc = get_unaligned_le32(trailer + EFX_REFLASH_TRAILER_CRC_OFST);
+ /* Addition could overflow u32, but not size_t since we already
+ * checked trailer_offset didn't overflow. So cast to size_t first.
+ */
+ crc = crc32_le(0, header, (size_t)header_len + payload_size);
+ if (crc != expected_crc)
+ return false;
+
+ *type = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST);
+ *subtype = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST);
+ if (*type == EFX_REFLASH_FIRMWARE_TYPE_BUNDLE) {
+ /* All the bundle data is written verbatim to NVRAM */
+ *data = fw->data;
+ *data_size = fw->size;
+ } else {
+ /* Other payload types strip the reflash header and trailer
+ * from the data written to NVRAM
+ */
+ *data = header + header_len;
+ *data_size = payload_size;
+ }
+
+ return true;
+}
+
+/* Map from FIRMWARE_TYPE to NVRAM_PARTITION_TYPE */
+static int efx_reflash_partition_type(u32 type, u32 subtype,
+ u32 *partition_type,
+ u32 *partition_subtype)
+{
+ int rc = 0;
+
+ switch (type) {
+ case EFX_REFLASH_FIRMWARE_TYPE_BOOTROM:
+ *partition_type = NVRAM_PARTITION_TYPE_EXPANSION_ROM;
+ *partition_subtype = subtype;
+ break;
+ case EFX_REFLASH_FIRMWARE_TYPE_BUNDLE:
+ *partition_type = NVRAM_PARTITION_TYPE_BUNDLE;
+ *partition_subtype = subtype;
+ break;
+ default:
+ /* Not supported */
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/* Try to parse a SmartNIC image header at the specified offset */
+static bool efx_reflash_parse_snic_header(const struct firmware *fw,
+ size_t header_offset,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data, size_t *data_size)
+{
+ u32 magic, version, payload_size, header_len, expected_crc, crc;
+ size_t header_end, payload_end;
+ const u8 *header;
+
+ if (check_add_overflow(header_offset, EFX_SNICIMAGE_HEADER_MINLEN,
+ &header_end) ||
+ fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_MAGIC_OFST);
+ if (magic != EFX_SNICIMAGE_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_VERSION_OFST);
+ if (version != EFX_SNICIMAGE_HEADER_VERSION_VALUE)
+ return false;
+
+ header_len = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_LENGTH_OFST);
+ if (check_add_overflow(header_offset, header_len, &header_end))
+ return false;
+ payload_size = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST);
+ if (check_add_overflow(header_end, payload_size, &payload_end) ||
+ fw->size < payload_end)
+ return false;
+
+ expected_crc = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_CRC_OFST);
+
+ /* Calculate CRC omitting the expected CRC field itself */
+ crc = crc32_le(~0, header, EFX_SNICIMAGE_HEADER_CRC_OFST);
+ crc = ~crc32_le(crc,
+ header + EFX_SNICIMAGE_HEADER_CRC_OFST +
+ EFX_SNICIMAGE_HEADER_CRC_LEN,
+ header_len + payload_size - EFX_SNICIMAGE_HEADER_CRC_OFST -
+ EFX_SNICIMAGE_HEADER_CRC_LEN);
+ if (crc != expected_crc)
+ return false;
+
+ *partition_type =
+ get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST);
+ *partition_subtype =
+ get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST);
+ *data = fw->data;
+ *data_size = fw->size;
+ return true;
+}
+
+/* Try to parse a SmartNIC bundle header at the specified offset */
+static bool efx_reflash_parse_snic_bundle_header(const struct firmware *fw,
+ size_t header_offset,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data,
+ size_t *data_size)
+{
+ u32 magic, version, bundle_type, header_len, expected_crc, crc;
+ size_t header_end;
+ const u8 *header;
+
+ if (check_add_overflow(header_offset, EFX_SNICBUNDLE_HEADER_LEN,
+ &header_end))
+ return false;
+ if (fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_MAGIC_OFST);
+ if (magic != EFX_SNICBUNDLE_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_VERSION_OFST);
+ if (version != EFX_SNICBUNDLE_HEADER_VERSION_VALUE)
+ return false;
+
+ bundle_type = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST);
+ if (bundle_type != NVRAM_PARTITION_TYPE_BUNDLE)
+ return false;
+
+ header_len = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_LENGTH_OFST);
+ if (header_len != EFX_SNICBUNDLE_HEADER_LEN)
+ return false;
+
+ expected_crc = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_CRC_OFST);
+ crc = ~crc32_le(~0, header, EFX_SNICBUNDLE_HEADER_CRC_OFST);
+ if (crc != expected_crc)
+ return false;
+
+ *partition_type = NVRAM_PARTITION_TYPE_BUNDLE;
+ *partition_subtype = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST);
+ *data = fw->data;
+ *data_size = fw->size;
+ return true;
+}
+
+/* Try to find a valid firmware payload in the firmware data.
+ * When we recognise a valid header, we parse it for the partition type
+ * (so we know where to ask the MC to write it to) and the location of
+ * the data blob to write.
+ */
+static int efx_reflash_parse_firmware_data(const struct firmware *fw,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data, size_t *data_size)
+{
+ size_t header_offset;
+ u32 type, subtype;
+
+ /* Some packaging formats (such as CMS/PKCS#7 signed images)
+ * prepend a header for which finding the size is a non-trivial
+ * task, so step through the firmware data until we find a valid
+ * header.
+ *
+ * The checks are intended to reject firmware data that is clearly not
+ * in the expected format. They do not need to be exhaustive as the
+ * running firmware will perform its own comprehensive validity and
+ * compatibility checks during the update procedure.
+ *
+ * Firmware packages may contain multiple reflash images, e.g. a
+ * bundle containing one or more other images. Only check the
+ * outermost container by stopping after the first candidate image
+ * found even it is for an unsupported partition type.
+ */
+ for (header_offset = 0; header_offset < fw->size; header_offset++) {
+ if (efx_reflash_parse_snic_bundle_header(fw, header_offset,
+ partition_type,
+ partition_subtype,
+ data, data_size))
+ return 0;
+
+ if (efx_reflash_parse_snic_header(fw, header_offset,
+ partition_type,
+ partition_subtype, data,
+ data_size))
+ return 0;
+
+ if (efx_reflash_parse_reflash_header(fw, header_offset, &type,
+ &subtype, data, data_size))
+ return efx_reflash_partition_type(type, subtype,
+ partition_type,
+ partition_subtype);
+ }
+
+ return -EINVAL;
+}
+
+/* Limit the number of status updates during the erase or write phases */
+#define EFX_DEVLINK_STATUS_UPDATE_COUNT 50
+
+/* Expected timeout for the efx_mcdi_nvram_update_finish_polled() */
+#define EFX_DEVLINK_UPDATE_FINISH_TIMEOUT 900
+
+/* Ideal erase chunk size. This is a balance between minimising the number of
+ * MCDI requests to erase an entire partition whilst avoiding tripping the MCDI
+ * RPC timeout.
+ */
+#define EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE (64 * 1024)
+
+static int efx_reflash_erase_partition(struct efx_nic *efx,
+ struct netlink_ext_ack *extack,
+ struct devlink *devlink, u32 type,
+ size_t partition_size,
+ size_t align)
+{
+ size_t chunk, offset, next_update;
+ int rc;
+
+ /* Partitions that cannot be erased or do not require erase before
+ * write are advertised with a erase alignment/sector size of zero.
+ */
+ if (align == 0)
+ /* Nothing to do */
+ return 0;
+
+ if (partition_size % align)
+ return -EINVAL;
+
+ /* Erase the entire NVRAM partition a chunk at a time to avoid
+ * potentially tripping the MCDI RPC timeout.
+ */
+ if (align >= EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE)
+ chunk = align;
+ else
+ chunk = rounddown(EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE, align);
+
+ for (offset = 0, next_update = 0; offset < partition_size; offset += chunk) {
+ if (offset >= next_update) {
+ devlink_flash_update_status_notify(devlink, "Erasing",
+ NULL, offset,
+ partition_size);
+ next_update += partition_size / EFX_DEVLINK_STATUS_UPDATE_COUNT;
+ }
+
+ chunk = min_t(size_t, partition_size - offset, chunk);
+ rc = efx_mcdi_nvram_erase(efx, type, offset, chunk);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Erase failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ devlink_flash_update_status_notify(devlink, "Erasing", NULL,
+ partition_size, partition_size);
+
+ return 0;
+}
+
+static int efx_reflash_write_partition(struct efx_nic *efx,
+ struct netlink_ext_ack *extack,
+ struct devlink *devlink, u32 type,
+ const u8 *data, size_t data_size,
+ size_t align)
+{
+ size_t write_max, chunk, offset, next_update;
+ int rc;
+
+ if (align == 0)
+ return -EINVAL;
+
+ /* Write the NVRAM partition in chunks that are the largest multiple
+ * of the partition's required write alignment that will fit into the
+ * MCDI NVRAM_WRITE RPC payload.
+ */
+ if (efx->type->mcdi_max_ver < 2)
+ write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN *
+ MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM;
+ else
+ write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN *
+ MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2;
+ chunk = rounddown(write_max, align);
+
+ for (offset = 0, next_update = 0; offset + chunk <= data_size; offset += chunk) {
+ if (offset >= next_update) {
+ devlink_flash_update_status_notify(devlink, "Writing",
+ NULL, offset,
+ data_size);
+ next_update += data_size / EFX_DEVLINK_STATUS_UPDATE_COUNT;
+ }
+
+ rc = efx_mcdi_nvram_write(efx, type, offset, data + offset, chunk);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Write failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ /* Round up left over data to satisfy write alignment */
+ if (offset < data_size) {
+ size_t remaining = data_size - offset;
+ u8 *buf;
+
+ if (offset >= next_update)
+ devlink_flash_update_status_notify(devlink, "Writing",
+ NULL, offset,
+ data_size);
+
+ chunk = roundup(remaining, align);
+ buf = kmalloc(chunk, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data + offset, remaining);
+ memset(buf + remaining, 0xFF, chunk - remaining);
+ rc = efx_mcdi_nvram_write(efx, type, offset, buf, chunk);
+ kfree(buf);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Write failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ devlink_flash_update_status_notify(devlink, "Writing", NULL, data_size,
+ data_size);
+
+ return 0;
+}
+
+int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ size_t data_size, size, erase_align, write_align;
+ struct devlink *devlink = efx->devlink;
+ u32 type, data_subtype, subtype;
+ const u8 *data;
+ bool protected;
+ int rc;
+
+ if (!efx_has_cap(efx, BUNDLE_UPDATE)) {
+ NL_SET_ERR_MSG_MOD(extack, "NVRAM bundle updates are not supported by the firmware");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&efx->reflash_mutex);
+
+ devlink_flash_update_status_notify(devlink, "Checking update", NULL, 0, 0);
+
+ if (efx->type->flash_auto_partition) {
+ /* NIC wants entire FW file including headers;
+ * FW will validate 'subtype' if there is one
+ */
+ type = NVRAM_PARTITION_TYPE_AUTO;
+ data = fw->data;
+ data_size = fw->size;
+ } else {
+ rc = efx_reflash_parse_firmware_data(fw, &type, &data_subtype, &data,
+ &data_size);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image validation check failed");
+ goto out_unlock;
+ }
+
+ rc = efx_mcdi_nvram_metadata(efx, type, &subtype, NULL, NULL, 0);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Metadata query for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ if (subtype != data_subtype) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image is not appropriate for this adapter");
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_align, &write_align,
+ &protected);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Info query for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ if (protected) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x is protected",
+ type);
+ rc = -EPERM;
+ goto out_unlock;
+ }
+
+ if (write_align == 0) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x is not writable",
+ type);
+ rc = -EACCES;
+ goto out_unlock;
+ }
+
+ if (erase_align != 0 && size % erase_align) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x has a bad partition table entry, can't erase it",
+ type);
+ rc = -EACCES;
+ goto out_unlock;
+ }
+
+ if (data_size > size) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Firmware image is too big for NVRAM partition %#x",
+ type);
+ rc = -EFBIG;
+ goto out_unlock;
+ }
+
+ devlink_flash_update_status_notify(devlink, "Starting update", NULL, 0, 0);
+
+ rc = efx_mcdi_nvram_update_start(efx, type);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Update start request for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ rc = efx_reflash_erase_partition(efx, extack, devlink, type, size,
+ erase_align);
+ if (rc)
+ goto out_update_finish;
+
+ rc = efx_reflash_write_partition(efx, extack, devlink, type, data,
+ data_size, write_align);
+ if (rc)
+ goto out_update_finish;
+
+ devlink_flash_update_timeout_notify(devlink, "Finishing update", NULL,
+ EFX_DEVLINK_UPDATE_FINISH_TIMEOUT);
+
+out_update_finish:
+ if (rc)
+ /* Don't obscure the return code from an earlier failure */
+ efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_ABORT);
+ else
+ rc = efx_mcdi_nvram_update_finish_polled(efx, type);
+out_unlock:
+ mutex_unlock(&efx->reflash_mutex);
+ devlink_flash_update_status_notify(devlink, rc ? "Update failed" :
+ "Update complete",
+ NULL, 0, 0);
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/efx_reflash.h b/drivers/net/ethernet/sfc/efx_reflash.h
new file mode 100644
index 000000000000..3dffac565161
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_reflash.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_REFLASH_H
+#define _EFX_REFLASH_H
+
+#include "net_driver.h"
+#include <linux/firmware.h>
+
+int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+
+#endif /* _EFX_REFLASH_H */
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 4af56333ea49..c44df8e4dd30 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -1453,8 +1453,8 @@ static void falcon_stats_complete(struct ef4_nic *efx)
static void falcon_stats_timer_func(struct timer_list *t)
{
- struct falcon_nic_data *nic_data = from_timer(nic_data, t,
- stats_timer);
+ struct falcon_nic_data *nic_data = timer_container_of(nic_data, t,
+ stats_timer);
struct ef4_nic *efx = nic_data->efx;
spin_lock(&efx->stats_lock);
@@ -2657,7 +2657,7 @@ void falcon_stop_nic_stats(struct ef4_nic *efx)
++nic_data->stats_disable_count;
spin_unlock_bh(&efx->stats_lock);
- del_timer_sync(&nic_data->stats_timer);
+ timer_delete_sync(&nic_data->stats_timer);
/* Wait enough time for the most recent transfer to
* complete. */
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 6bbdb5d2eebf..f69fcf6caca8 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -382,7 +382,8 @@ void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
void ef4_rx_slow_fill(struct timer_list *t)
{
- struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+ struct ef4_rx_queue *rx_queue = timer_container_of(rx_queue, t,
+ slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
ef4_nic_generate_fill_event(rx_queue);
@@ -791,7 +792,7 @@ void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
- del_timer_sync(&rx_queue->slow_fill);
+ timer_delete_sync(&rx_queue->slow_fill);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
diff --git a/drivers/net/ethernet/sfc/fw_formats.h b/drivers/net/ethernet/sfc/fw_formats.h
new file mode 100644
index 000000000000..cbc350c96013
--- /dev/null
+++ b/drivers/net/ethernet/sfc/fw_formats.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_FW_FORMATS_H
+#define _EFX_FW_FORMATS_H
+
+/* Header layouts of firmware update images recognised by Efx NICs.
+ * The sources-of-truth for these layouts are AMD internal documents
+ * and sfregistry headers, neither of which are available externally
+ * nor usable directly by the driver.
+ *
+ * While each format includes a 'magic number', these are at different
+ * offsets in the various formats, and a legal header for one format
+ * could have the right value in whichever field occupies that offset
+ * to match another format's magic.
+ * Besides, some packaging formats (such as CMS/PKCS#7 signed images)
+ * prepend a header for which finding the size is a non-trivial task;
+ * rather than trying to parse those headers, we search byte-by-byte
+ * through the provided firmware image looking for a valid header.
+ * Thus, format recognition has to include validation of the checksum
+ * field, even though the firmware will validate that itself before
+ * applying the image.
+ */
+
+/* EF10 (Medford2, X2) "reflash" header format. Defined in SF-121352-AN */
+#define EFX_REFLASH_HEADER_MAGIC_OFST 0
+#define EFX_REFLASH_HEADER_MAGIC_LEN 4
+#define EFX_REFLASH_HEADER_MAGIC_VALUE 0x106F1A5
+
+#define EFX_REFLASH_HEADER_VERSION_OFST 4
+#define EFX_REFLASH_HEADER_VERSION_LEN 4
+#define EFX_REFLASH_HEADER_VERSION_VALUE 4
+
+#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST 8
+#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_LEN 4
+#define EFX_REFLASH_FIRMWARE_TYPE_BOOTROM 0x2
+#define EFX_REFLASH_FIRMWARE_TYPE_BUNDLE 0xd
+
+#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST 12
+#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_LEN 4
+
+#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST 16
+#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_LEN 4
+
+#define EFX_REFLASH_HEADER_LENGTH_OFST 20
+#define EFX_REFLASH_HEADER_LENGTH_LEN 4
+
+/* Reflash trailer */
+#define EFX_REFLASH_TRAILER_CRC_OFST 0
+#define EFX_REFLASH_TRAILER_CRC_LEN 4
+
+#define EFX_REFLASH_TRAILER_LEN \
+ (EFX_REFLASH_TRAILER_CRC_OFST + EFX_REFLASH_TRAILER_CRC_LEN)
+
+/* EF100 "SmartNIC image" header format.
+ * Defined in sfregistry "src/layout/snic_image_hdr.h".
+ */
+#define EFX_SNICIMAGE_HEADER_MAGIC_OFST 16
+#define EFX_SNICIMAGE_HEADER_MAGIC_LEN 4
+#define EFX_SNICIMAGE_HEADER_MAGIC_VALUE 0x541C057A
+
+#define EFX_SNICIMAGE_HEADER_VERSION_OFST 20
+#define EFX_SNICIMAGE_HEADER_VERSION_LEN 4
+#define EFX_SNICIMAGE_HEADER_VERSION_VALUE 1
+
+#define EFX_SNICIMAGE_HEADER_LENGTH_OFST 24
+#define EFX_SNICIMAGE_HEADER_LENGTH_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST 36
+#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST 40
+#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST 60
+#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_CRC_OFST 64
+#define EFX_SNICIMAGE_HEADER_CRC_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_MINLEN 256
+
+/* EF100 "SmartNIC bundle" header format. Defined in SF-122606-TC */
+#define EFX_SNICBUNDLE_HEADER_MAGIC_OFST 0
+#define EFX_SNICBUNDLE_HEADER_MAGIC_LEN 4
+#define EFX_SNICBUNDLE_HEADER_MAGIC_VALUE 0xB1001001
+
+#define EFX_SNICBUNDLE_HEADER_VERSION_OFST 4
+#define EFX_SNICBUNDLE_HEADER_VERSION_LEN 4
+#define EFX_SNICBUNDLE_HEADER_VERSION_VALUE 1
+
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST 8
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST 12
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_LENGTH_OFST 20
+#define EFX_SNICBUNDLE_HEADER_LENGTH_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_CRC_OFST 224
+#define EFX_SNICBUNDLE_HEADER_CRC_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_LEN \
+ (EFX_SNICBUNDLE_HEADER_CRC_OFST + EFX_SNICBUNDLE_HEADER_CRC_LEN)
+
+#endif /* _EFX_FW_FORMATS_H */
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 4cc7b501135f..ef374a8e05c3 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -217,28 +217,4 @@ _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
(reg) != 0xa1c), \
page)
-/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
- * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
- * collector register.
- */
-static inline void _efx_writed_page_locked(struct efx_nic *efx,
- const efx_dword_t *value,
- unsigned int reg,
- unsigned int page)
-{
- unsigned long flags __attribute__ ((unused));
-
- if (page == 0) {
- spin_lock_irqsave(&efx->biu_lock, flags);
- efx_writed(efx, value, efx_paged_reg(efx, page, reg));
- spin_unlock_irqrestore(&efx->biu_lock, flags);
- } else {
- efx_writed(efx, value, efx_paged_reg(efx, page, reg));
- }
-}
-#define efx_writed_page_locked(efx, value, reg, page) \
- _efx_writed_page_locked(efx, value, \
- reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
- page)
-
#endif /* EFX_IO_H */
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 50f097487b14..6fd0c1e9a7d5 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -755,7 +755,7 @@ int efx_mae_match_check_caps_lhs(struct efx_nic *efx,
rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
ingress_port_mask_type);
if (rc) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s",
mask_type_name(ingress_port_mask_type),
"ingress_port");
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index d461b1a6ce81..5e9b8def5e42 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -530,7 +530,7 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
* of it aborting the next request.
*/
if (!timeout)
- del_timer_sync(&mcdi->async_timer);
+ timer_delete_sync(&mcdi->async_timer);
spin_lock(&mcdi->async_lock);
async = list_first_entry(&mcdi->async_list,
@@ -605,7 +605,7 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
static void efx_mcdi_timeout_async(struct timer_list *t)
{
- struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
+ struct efx_mcdi_iface *mcdi = timer_container_of(mcdi, t, async_timer);
efx_mcdi_complete_async(mcdi, true);
}
@@ -1122,7 +1122,7 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
/* We must be in poll or fail mode so no more requests can be queued */
BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
- del_timer_sync(&mcdi->async_timer);
+ timer_delete_sync(&mcdi->async_timer);
/* If a request is still running, make sure we give the MC
* time to complete it so that the response won't overwrite our
@@ -1625,12 +1625,15 @@ fail:
return rc;
}
+#define EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN 128
+
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
- bool *protected_out)
+ size_t *write_size_out, bool *protected_out)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_V2_OUT_LEN);
+ size_t write_size = 0;
size_t outlen;
int rc;
@@ -1645,6 +1648,12 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
goto fail;
}
+ if (outlen >= MC_CMD_NVRAM_INFO_V2_OUT_LEN)
+ write_size = MCDI_DWORD(outbuf, NVRAM_INFO_V2_OUT_WRITESIZE);
+ else
+ write_size = EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN;
+
+ *write_size_out = write_size;
*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
@@ -2163,11 +2172,9 @@ out_free:
return rc;
}
-#ifdef CONFIG_SFC_MTD
-
#define EFX_MCDI_NVRAM_LEN_MAX 128
-static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
int rc;
@@ -2185,6 +2192,8 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
return rc;
}
+#ifdef CONFIG_SFC_MTD
+
static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
@@ -2209,13 +2218,20 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
return 0;
}
-static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer, size_t length)
+#endif /* CONFIG_SFC_MTD */
+
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
{
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ efx_dword_t *inbuf;
+ size_t inlen;
int rc;
+ inlen = ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4);
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
@@ -2223,14 +2239,14 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
- ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, inlen, NULL, 0, NULL);
+ kfree(inbuf);
+
return rc;
}
-static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length)
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, loff_t offset,
+ size_t length)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
int rc;
@@ -2246,7 +2262,8 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
return rc;
}
-static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type,
+ enum efx_update_finish_mode mode)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
@@ -2254,22 +2271,41 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
int rc, rc2;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
- /* Always set this flag. Old firmware ignores it */
- MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+
+ /* Old firmware doesn't support background update finish and abort
+ * operations. Fallback to waiting if the requested mode is not
+ * supported.
+ */
+ if (!efx_has_cap(efx, NVRAM_UPDATE_POLL_VERIFY_RESULT) ||
+ (!efx_has_cap(efx, NVRAM_UPDATE_ABORT_SUPPORTED) &&
+ mode == EFX_UPDATE_FINISH_ABORT))
+ mode = EFX_UPDATE_FINISH_WAIT;
+
+ MCDI_POPULATE_DWORD_4(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
- 1);
+ (mode != EFX_UPDATE_FINISH_ABORT),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND,
+ (mode == EFX_UPDATE_FINISH_BACKGROUND),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT,
+ (mode == EFX_UPDATE_FINISH_POLL),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT,
+ (mode == EFX_UPDATE_FINISH_ABORT));
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
- if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
+ if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS &&
+ rc2 != MC_CMD_NVRAM_VERIFY_RC_PENDING)
netif_err(efx, drv, efx->net_dev,
"NVRAM update failed verification with code 0x%x\n",
rc2);
switch (rc2) {
case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
break;
+ case MC_CMD_NVRAM_VERIFY_RC_PENDING:
+ rc = -EAGAIN;
+ break;
case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
@@ -2284,6 +2320,8 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
+ case MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED:
+ case MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE:
rc = -EPERM;
break;
default:
@@ -2296,6 +2334,42 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
return rc;
}
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS 5
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS 5000
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES 185
+
+int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type)
+{
+ unsigned int delay = EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS;
+ unsigned int retry = 0;
+ int rc;
+
+ /* NVRAM updates can take a long time (e.g. up to 1 minute for bundle
+ * images). Polling for NVRAM update completion ensures that other MCDI
+ * commands can be issued before the background NVRAM update completes.
+ *
+ * The initial call either completes the update synchronously, or
+ * returns -EAGAIN to indicate processing is continuing. In the latter
+ * case, we poll for at least 900 seconds, at increasing intervals
+ * (5ms, 50ms, 500ms, 5s).
+ */
+ rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_BACKGROUND);
+ while (rc == -EAGAIN) {
+ if (retry > EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES)
+ return -ETIMEDOUT;
+ retry++;
+
+ msleep(delay);
+ if (delay < EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS)
+ delay *= 10;
+
+ rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_POLL);
+ }
+ return rc;
+}
+
+#ifdef CONFIG_SFC_MTD
+
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
@@ -2389,7 +2463,8 @@ int efx_mcdi_mtd_sync(struct mtd_info *mtd)
if (part->updating) {
part->updating = false;
- rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+ rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type,
+ EFX_UPDATE_FINISH_WAIT);
}
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index cdb17d7c147f..3755cd3fe1e6 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -392,7 +392,7 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
- bool *protected_out);
+ size_t *write_size_out, bool *protected_out);
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
u32 *subtype, u16 version[4], char *desc,
@@ -424,6 +424,26 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type);
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length);
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length);
+int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
+ u32 *subtype, u16 version[4], char *desc,
+ size_t descsize);
+
+enum efx_update_finish_mode {
+ EFX_UPDATE_FINISH_WAIT,
+ EFX_UPDATE_FINISH_BACKGROUND,
+ EFX_UPDATE_FINISH_POLL,
+ EFX_UPDATE_FINISH_ABORT,
+};
+
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type,
+ enum efx_update_finish_mode mode);
+int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type);
+
#ifdef CONFIG_SFC_MTD
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, u8 *buffer);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index cd297e19cddc..9cb339c461fb 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -72,19 +72,19 @@
* | \------- Error
* \------------------------------ Resync (always set)
*
- * The client writes it's request into MC shared memory, and rings the
- * doorbell. Each request is completed by either by the MC writing
+ * The client writes its request into MC shared memory, and rings the
+ * doorbell. Each request is completed either by the MC writing
* back into shared memory, or by writing out an event.
*
* All MCDI commands support completion by shared memory response. Each
* request may also contain additional data (accounted for by HEADER.LEN),
- * and some response's may also contain additional data (again, accounted
+ * and some responses may also contain additional data (again, accounted
* for by HEADER.LEN).
*
* Some MCDI commands support completion by event, in which any associated
* response data is included in the event.
*
- * The protocol requires one response to be delivered for every request, a
+ * The protocol requires one response to be delivered for every request; a
* request should not be sent unless the response for the previous request
* has been received (either by polling shared memory, or by receiving
* an event).
@@ -165,6 +165,7 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
#define MC_CMD_ERR_CODE_OFST 0
#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
@@ -321,7 +322,7 @@
/* enum: The requesting client is not a function */
#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
/* enum: The requested operation might require the command to be passed between
- * MCs, and thetransport doesn't support that. Should only ever been seen over
+ * MCs, and the transport doesn't support that. Should only ever been seen over
* the UART.
*/
#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
@@ -358,7 +359,7 @@
* sub-variant switching.
*/
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* enum: The clock whose frequency you've attempted to set set doesn't exist on
+/* enum: The clock whose frequency you've attempted to set doesn't exist on
* this NIC
*/
#define MC_CMD_ERR_NO_CLOCK 0x1015
@@ -387,25 +388,6 @@
*/
#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
-/* MC_CMD_RESOURCE_SPECIFIER enum */
-/* enum: Any */
-#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
-#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
-
-/* MC_CMD_FPGA_FLASH_INDEX enum */
-#define MC_CMD_FPGA_FLASH_PRIMARY 0x0 /* enum */
-#define MC_CMD_FPGA_FLASH_SECONDARY 0x1 /* enum */
-
-/* MC_CMD_EXTERNAL_MAE_LINK_MODE enum */
-/* enum: Legacy mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_LEGACY 0x0
-/* enum: Switchdev mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_SWITCHDEV 0x1
-/* enum: Bootstrap mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_BOOTSTRAP 0x2
-/* enum: Link-mode change is in-progress as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_PENDING 0xf
-
/* PCIE_INTERFACE enum: From EF100 onwards, SFC products can have multiple PCIe
* interfaces. There is a need to refer to interfaces explicitly from drivers
* (for example, a management driver on one interface administering a function
@@ -424,6 +406,14 @@
* an on-NIC ARM module is expected to be connected.
*/
#define PCIE_INTERFACE_NIC_EMBEDDED 0x1
+/* enum: The PCIe logical interface 0. It is an alias for HOST_PRIMARY. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_0 0x0
+/* enum: The PCIe logical interface 1. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_1 0x2
+/* enum: The PCIe logical interface 2. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_2 0x3
+/* enum: The PCIe logical interface 3. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_3 0x4
/* enum: For MCDI commands issued over a PCIe interface, this value is
* translated into the interface over which the command was issued. Not
* meaningful for other MCDI transports.
@@ -640,7 +630,11 @@
* be allocated by different counter blocks, so e.g. AR counter 42 is different
* from CT counter 42. Generation counts are also type-specific. This value is
* also present in the header of streaming counter packets, in the IDENTIFIER
- * field (see packetiser packet format definitions).
+ * field (see packetiser packet format definitions). Also note that LACP
+ * counter IDs are not allocated individually, instead the counter IDs are
+ * directly tied to the LACP balance table indices. These in turn are allocated
+ * in large contiguous blocks as a LAG config. Calling MAE_COUNTER_ALLOC/FREE
+ * with an LACP counter type will return EPERM.
*/
/* enum: Action Rule counters - can be referenced in AR response. */
#define MAE_COUNTER_TYPE_AR 0x0
@@ -648,6 +642,14 @@
#define MAE_COUNTER_TYPE_CT 0x1
/* enum: Outer Rule counters - can be referenced in OR response. */
#define MAE_COUNTER_TYPE_OR 0x2
+/* enum: LACP counters - linked to LACP balance table entries. */
+#define MAE_COUNTER_TYPE_LACP 0x3
+
+/* MAE_COUNTER_ID enum: ID of allocated counter or counter list. */
+/* enum: A counter ID that is guaranteed never to represent a real counter or
+ * counter list.
+ */
+#define MAE_COUNTER_ID_NULL 0xffffffff
/* TABLE_ID enum: Unique IDs for tables. The 32-bit ID values have been
* structured with bits [31:24] reserved (0), [23:16] indicating which major
@@ -656,7 +658,9 @@
* variations of the same table. (All of the tables currently defined within
* the streaming engines are listed here, but this does not imply that they are
* all supported - MC_CMD_TABLE_LIST returns the list of actually supported
- * tables.)
+ * tables.) The DPU offload engines' enumerators follow a deliberate pattern:
+ * 0x01010000 + is_dpu_net * 0x10000 + is_wr_or_tx * 0x8000 + is_lite_pipe *
+ * 0x1000 + oe_engine_type * 0x100 + oe_instance_within_pipe * 0x10
*/
/* enum: Outer_Rule_Table in the MAE - refer to SF-123102-TC. */
#define TABLE_ID_OUTER_RULE_TABLE 0x10000
@@ -694,45 +698,70 @@
#define TABLE_ID_RSS_CONTEXT_TABLE 0x20200
/* enum: Indirection_Table in VNIC Rx - refer to SF-123102-TC. */
#define TABLE_ID_INDIRECTION_TABLE 0x20300
-
-/* TABLE_COMPRESSED_VLAN enum: Compressed VLAN TPID as used by some field
- * types; can be calculated by (((ether_type_msb >> 2) & 0x4) ^ 0x4) |
- * (ether_type_msb & 0x3);
- */
-#define TABLE_COMPRESSED_VLAN_TPID_8100 0x5 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_88A8 0x4 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9100 0x1 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9200 0x2 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9300 0x3 /* enum */
-
-/* TABLE_NAT_DIR enum: NAT direction. */
-#define TABLE_NAT_DIR_SOURCE 0x0 /* enum */
-#define TABLE_NAT_DIR_DEST 0x1 /* enum */
-
-/* TABLE_RSS_KEY_MODE enum: Defines how the value for Toeplitz hashing for RSS
- * is constructed as a concatenation (indicated here by "++") of packet header
- * fields.
- */
-/* enum: IP src addr ++ IP dst addr */
-#define TABLE_RSS_KEY_MODE_SA_DA 0x0
-/* enum: IP src addr ++ IP dst addr ++ TCP/UDP src port ++ TCP/UDP dst port */
-#define TABLE_RSS_KEY_MODE_SA_DA_SP_DP 0x1
-/* enum: IP src addr */
-#define TABLE_RSS_KEY_MODE_SA 0x2
-/* enum: IP dst addr */
-#define TABLE_RSS_KEY_MODE_DA 0x3
-/* enum: IP src addr ++ TCP/UDP src port */
-#define TABLE_RSS_KEY_MODE_SA_SP 0x4
-/* enum: IP dest addr ++ TCP dest port */
-#define TABLE_RSS_KEY_MODE_DA_DP 0x5
-/* enum: Nothing (produces input of 0, resulting in output hash of 0) */
-#define TABLE_RSS_KEY_MODE_NONE 0x7
-
-/* TABLE_RSS_SPREAD_MODE enum: RSS spreading mode. */
-/* enum: RSS uses Indirection_Table lookup. */
-#define TABLE_RSS_SPREAD_MODE_INDIRECTION 0x0
-/* enum: RSS uses even spreading calculation. */
-#define TABLE_RSS_SPREAD_MODE_EVEN 0x1
+/* enum: DPU.host read pipe first CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_RD_CRC0_OE_PROFILE 0x1010000
+/* enum: DPU.host read pipe second CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_RD_CRC1_OE_PROFILE 0x1010010
+/* enum: DPU.host write pipe first CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_WR_CRC0_OE_PROFILE 0x1018000
+/* enum: DPU.host write pipe second CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_WR_CRC1_OE_PROFILE 0x1018010
+/* enum: DPU.net 'full' receive pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CRC0_OE_PROFILE 0x1020000
+/* enum: DPU.net 'full' receive pipe first checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CSUM0_OE_PROFILE 0x1020100
+/* enum: DPU.net 'full' receive pipe second checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CSUM1_OE_PROFILE 0x1020110
+/* enum: DPU.net 'full' receive pipe AES-GCM offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_AES_GCM0_OE_PROFILE 0x1020200
+/* enum: DPU.net 'lite' receive pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RXLITE_CRC0_OE_PROFILE 0x1021000
+/* enum: DPU.net 'lite' receive pipe checksum offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RXLITE_CSUM0_OE_PROFILE 0x1021100
+/* enum: DPU.net 'full' transmit pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CRC0_OE_PROFILE 0x1028000
+/* enum: DPU.net 'full' transmit pipe first checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CSUM0_OE_PROFILE 0x1028100
+/* enum: DPU.net 'full' transmit pipe second checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CSUM1_OE_PROFILE 0x1028110
+/* enum: DPU.net 'full' transmit pipe AES-GCM offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_AES_GCM0_OE_PROFILE 0x1028200
+/* enum: DPU.net 'lite' transmit pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TXLITE_CRC0_OE_PROFILE 0x1029000
+/* enum: DPU.net 'lite' transmit pipe checksum offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TXLITE_CSUM0_OE_PROFILE 0x1029100
/* TABLE_FIELD_ID enum: Unique IDs for fields. Related concepts have been
* loosely grouped together into blocks with gaps for expansion, but the values
@@ -1026,6 +1055,16 @@
#define TABLE_FIELD_ID_BAL_TBL_BASE_DIV64 0xde
/* enum: Length of balance table region: 0=>64, 1=>128, 2=>256. */
#define TABLE_FIELD_ID_BAL_TBL_LEN_ID 0xdf
+/* enum: LACP LAG ID (i.e. the low 3 bits of LACP LAG mport ID), indexing
+ * LACP_LAG_Config_Table. Refer to SF-123102-TC.
+ */
+#define TABLE_FIELD_ID_LACP_LAG_ID 0xe0
+/* enum: Address in LACP_Balance_Table. The balance table is partitioned
+ * between LAGs according to the settings in LACP_LAG_Config_Table and then
+ * indexed by the LACP hash, providing the mapping to destination mports. Refer
+ * to SF-123102-TC.
+ */
+#define TABLE_FIELD_ID_BAL_TBL_ADDR 0xe1
/* enum: UDP port to match for UDP-based encapsulations; required to be 0 for
* other encapsulation types.
*/
@@ -1082,6 +1121,55 @@
#define TABLE_FIELD_ID_INDIR_TBL_LEN_ID 0x105
/* enum: An offset to be applied to the base destination queue ID. */
#define TABLE_FIELD_ID_INDIR_OFFSET 0x106
+/* enum: DPU offload engine profile ID to address. */
+#define TABLE_FIELD_ID_OE_PROFILE 0x3e8
+/* enum: Width of the CRC to calculate - see CRC_VARIANT enum. */
+#define TABLE_FIELD_ID_CRC_VARIANT 0x3f2
+/* enum: If set, reflect the bits of each input byte, bit 7 is LSB, bit 0 is
+ * MSB. If clear, bit 7 is MSB, bit 0 is LSB.
+ */
+#define TABLE_FIELD_ID_CRC_REFIN 0x3f3
+/* enum: If set, reflect the bits of each output byte, bit 7 is LSB, bit 0 is
+ * MSB. If clear, bit 7 is MSB, bit 0 is LSB.
+ */
+#define TABLE_FIELD_ID_CRC_REFOUT 0x3f4
+/* enum: If set, invert every bit of the output value. */
+#define TABLE_FIELD_ID_CRC_INVOUT 0x3f5
+/* enum: The CRC polynomial to use for checksumming, in normal form. */
+#define TABLE_FIELD_ID_CRC_POLY 0x3f6
+/* enum: Operation for the checksum engine to perform - see DPU_CSUM_OP enum.
+ */
+#define TABLE_FIELD_ID_CSUM_OP 0x410
+/* enum: Byte offset of checksum relative to region_start (for VALIDATE_*
+ * operations only).
+ */
+#define TABLE_FIELD_ID_CSUM_OFFSET 0x411
+/* enum: Indicates there is additional data on OPR bus that needs to be
+ * incorporated into the payload checksum.
+ */
+#define TABLE_FIELD_ID_CSUM_OPR_ADDITIONAL_DATA 0x412
+/* enum: Log2 data size of additional data on OPR bus. */
+#define TABLE_FIELD_ID_CSUM_OPR_DATA_SIZE_LOG2 0x413
+/* enum: 4 byte offset of where to find the additional data on the OPR bus. */
+#define TABLE_FIELD_ID_CSUM_OPR_4B_OFF 0x414
+/* enum: Operation type for the AES-GCM core - see GCM_OP_CODE enum. */
+#define TABLE_FIELD_ID_GCM_OP_CODE 0x41a
+/* enum: Key length - AES_KEY_LEN enum. */
+#define TABLE_FIELD_ID_GCM_KEY_LEN 0x41b
+/* enum: OPR 4 byte offset for ICV or GHASH output (only in BULK_* mode) or
+ * IPSEC descrypt output.
+ */
+#define TABLE_FIELD_ID_GCM_OPR_4B_OFFSET 0x41c
+/* enum: If OP_CODE is BULK_*, indicates Emit GHASH (Fragment mode). Else,
+ * indicates IPSEC-ESN mode.
+ */
+#define TABLE_FIELD_ID_GCM_EMIT_GHASH_ISESN 0x41d
+/* enum: Replay Protection Enable. */
+#define TABLE_FIELD_ID_GCM_REPLAY_PROTECT_EN 0x41e
+/* enum: IPSEC Encrypt ESP trailer NEXT_HEADER byte. */
+#define TABLE_FIELD_ID_GCM_NEXT_HDR 0x41f
+/* enum: Replay Window Size. */
+#define TABLE_FIELD_ID_GCM_REPLAY_WIN_SIZE 0x420
/* MCDI_EVENT structuredef: The structure of an MCDI_EVENT on Siena/EF10/EF100
* platforms
@@ -1138,6 +1226,24 @@
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_LBN 0
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_WIDTH 24
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_LBN 24
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_WIDTH 7
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_LBN 31
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_WIDTH 1
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_LBN 0
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_WIDTH 24
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_LBN 24
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_WIDTH 7
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_LBN 31
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_WIDTH 1
#define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0
#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
@@ -1237,7 +1343,7 @@
#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
/* enum: Notify that invalid flash type detected */
#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
-/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+/* enum: Notify that the attempt to run FPGA Controller firmware timed out */
#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
/* enum: Failure to probe one or more FPGA boot flash chips */
#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
@@ -1255,7 +1361,7 @@
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
/* enum: FC Assert happened, but the register information is not available */
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
-/* enum: The register information for FC Assert is ready for readinng by driver
+/* enum: The register information for FC Assert is ready for reading by driver
*/
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0
@@ -1364,6 +1470,12 @@
#define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0
#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30
#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_LBN 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_WIDTH 16
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_LBN 16
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_WIDTH 16
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
/* Alias for PTP_DATA. */
@@ -1500,6 +1612,31 @@
* change to the journal.
*/
#define MCDI_EVENT_CODE_MPORT_JOURNAL_CHANGE 0x27
+/* enum: Notification that a source queue is enabled and attached to its proxy
+ * sink queue. SRC field contains the handle of the affected descriptor proxy
+ * function. DATA field contains the relative source queue number and absolute
+ * VI ID.
+ */
+#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_QUEUE_START 0x28
+/* enum: Notification of a change in link state and/or link speed of a network
+ * port link. This event applies to a network port identified by a handle,
+ * PORT_HANDLE, which is discovered by the driver using the MC_CMD_ENUM_PORTS
+ * command.
+ */
+#define MCDI_EVENT_CODE_PORT_LINKCHANGE 0x29
+/* enum: Notification of a change in the state of an MDI (external connector)
+ * of a network port. This typically corresponds to module plug/unplug for
+ * modular interfaces (e.g., SFP/QSFP and similar) or cable connect/disconnect.
+ * This event applies to a network port identified by a handle, PORT_HANDLE,
+ * which is discovered by the driver using the MC_CMD_ENUM_PORTS command.
+ */
+#define MCDI_EVENT_CODE_PORT_MODULECHANGE 0x2a
+/* enum: Notification that the port enumeration journal has changed since it
+ * was last read and updates can be read using the MC_CMD_ENUM_PORTS command.
+ * The firmware may moderate the events so that an event is not sent for every
+ * change to the journal.
+ */
+#define MCDI_EVENT_CODE_ENUM_PORTS_CHANGE 0x2b
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -1512,6 +1649,14 @@
#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LEN 4
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LEN 4
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LBN 0
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_WIDTH 32
#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
@@ -1668,247 +1813,6 @@
#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0
#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32
-/* FCDI_EVENT structuredef */
-#define FCDI_EVENT_LEN 8
-#define FCDI_EVENT_CONT_LBN 32
-#define FCDI_EVENT_CONT_WIDTH 1
-#define FCDI_EVENT_LEVEL_LBN 33
-#define FCDI_EVENT_LEVEL_WIDTH 3
-/* enum: Info. */
-#define FCDI_EVENT_LEVEL_INFO 0x0
-/* enum: Warning. */
-#define FCDI_EVENT_LEVEL_WARN 0x1
-/* enum: Error. */
-#define FCDI_EVENT_LEVEL_ERR 0x2
-/* enum: Fatal. */
-#define FCDI_EVENT_LEVEL_FATAL 0x3
-#define FCDI_EVENT_DATA_OFST 0
-#define FCDI_EVENT_DATA_LEN 4
-#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0
-#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
-#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
-#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
-#define FCDI_EVENT_LINK_UP 0x1 /* enum */
-#define FCDI_EVENT_DATA_LBN 0
-#define FCDI_EVENT_DATA_WIDTH 32
-#define FCDI_EVENT_SRC_LBN 36
-#define FCDI_EVENT_SRC_WIDTH 8
-#define FCDI_EVENT_EV_CODE_LBN 60
-#define FCDI_EVENT_EV_CODE_WIDTH 4
-#define FCDI_EVENT_CODE_LBN 44
-#define FCDI_EVENT_CODE_WIDTH 8
-/* enum: The FC was rebooted. */
-#define FCDI_EVENT_CODE_REBOOT 0x1
-/* enum: Bad assert. */
-#define FCDI_EVENT_CODE_ASSERT 0x2
-/* enum: DDR3 test result. */
-#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
-/* enum: Link status. */
-#define FCDI_EVENT_CODE_LINK_STATE 0x4
-/* enum: A timed read is ready to be serviced. */
-#define FCDI_EVENT_CODE_TIMED_READ 0x5
-/* enum: One or more PPS IN events */
-#define FCDI_EVENT_CODE_PPS_IN 0x6
-/* enum: Tick event from PTP clock */
-#define FCDI_EVENT_CODE_PTP_TICK 0x7
-/* enum: ECC error counters */
-#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
-/* enum: Current status of PTP */
-#define FCDI_EVENT_CODE_PTP_STATUS 0x9
-/* enum: Port id config to map MC-FC port idx */
-#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
-/* enum: Boot result or error code */
-#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
-#define FCDI_EVENT_REBOOT_SRC_LBN 36
-#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
-#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
-#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
-#define FCDI_EVENT_ASSERT_TYPE_LBN 36
-#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
-#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
-#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
-#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
-#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
-#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
-#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
-#define FCDI_EVENT_PTP_STATE_OFST 0
-#define FCDI_EVENT_PTP_STATE_LEN 4
-#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
-#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
-#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
-#define FCDI_EVENT_PTP_STATE_LBN 0
-#define FCDI_EVENT_PTP_STATE_WIDTH 32
-#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
-#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
-/* Index of MC port being referred to */
-#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
-#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
-/* FC Port index that matches the MC port index in SRC */
-#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
-#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
-#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
-#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
-#define FCDI_EVENT_BOOT_RESULT_OFST 0
-#define FCDI_EVENT_BOOT_RESULT_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
-#define FCDI_EVENT_BOOT_RESULT_LBN 0
-#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
-
-/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
- * to the MC. Note that this structure | is overlayed over a normal FCDI event
- * such that bits 32-63 containing | event code, level, source etc remain the
- * same. In this case the data | field of the header is defined to be the
- * number of timestamps
- */
-#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
-#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
-#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016
-#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8)
-/* Number of timestamps following */
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
-/* Seconds field of a timestamp record */
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
-/* Nanoseconds field of a timestamp record */
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
-/* Timestamp records comprising the event */
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_WIDTH 32
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LBN 96
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_WIDTH 32
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
-
-/* MUM_EVENT structuredef */
-#define MUM_EVENT_LEN 8
-#define MUM_EVENT_CONT_LBN 32
-#define MUM_EVENT_CONT_WIDTH 1
-#define MUM_EVENT_LEVEL_LBN 33
-#define MUM_EVENT_LEVEL_WIDTH 3
-/* enum: Info. */
-#define MUM_EVENT_LEVEL_INFO 0x0
-/* enum: Warning. */
-#define MUM_EVENT_LEVEL_WARN 0x1
-/* enum: Error. */
-#define MUM_EVENT_LEVEL_ERR 0x2
-/* enum: Fatal. */
-#define MUM_EVENT_LEVEL_FATAL 0x3
-#define MUM_EVENT_DATA_OFST 0
-#define MUM_EVENT_DATA_LEN 4
-#define MUM_EVENT_SENSOR_ID_OFST 0
-#define MUM_EVENT_SENSOR_ID_LBN 0
-#define MUM_EVENT_SENSOR_ID_WIDTH 8
-/* Enum values, see field(s): */
-/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-#define MUM_EVENT_SENSOR_STATE_OFST 0
-#define MUM_EVENT_SENSOR_STATE_LBN 8
-#define MUM_EVENT_SENSOR_STATE_WIDTH 8
-#define MUM_EVENT_PORT_PHY_READY_OFST 0
-#define MUM_EVENT_PORT_PHY_READY_LBN 0
-#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
-#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0
-#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
-#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
-#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
-#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0
-#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
-#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
-#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
-#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0
-#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
-#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
-#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
-#define MUM_EVENT_DATA_LBN 0
-#define MUM_EVENT_DATA_WIDTH 32
-#define MUM_EVENT_SRC_LBN 36
-#define MUM_EVENT_SRC_WIDTH 8
-#define MUM_EVENT_EV_CODE_LBN 60
-#define MUM_EVENT_EV_CODE_WIDTH 4
-#define MUM_EVENT_CODE_LBN 44
-#define MUM_EVENT_CODE_WIDTH 8
-/* enum: The MUM was rebooted. */
-#define MUM_EVENT_CODE_REBOOT 0x1
-/* enum: Bad assert. */
-#define MUM_EVENT_CODE_ASSERT 0x2
-/* enum: Sensor failure. */
-#define MUM_EVENT_CODE_SENSOR 0x3
-/* enum: Link fault has been asserted, or has cleared. */
-#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
-#define MUM_EVENT_SENSOR_DATA_OFST 0
-#define MUM_EVENT_SENSOR_DATA_LEN 4
-#define MUM_EVENT_SENSOR_DATA_LBN 0
-#define MUM_EVENT_SENSOR_DATA_WIDTH 32
-#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
-#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
-#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
-#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
-#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
-#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
-#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
-#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
-#define MUM_EVENT_PORT_PHY_TECH_OFST 0
-#define MUM_EVENT_PORT_PHY_TECH_LEN 4
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
-#define MUM_EVENT_PORT_PHY_TECH_LBN 0
-#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
-#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
-
/***********************************/
/* MC_CMD_READ32
@@ -1969,90 +1873,6 @@
/***********************************/
-/* MC_CMD_COPYCODE
- * Copy MC code between two locations and jump. Note - this command really
- * belongs to INSECURE category but is required by shmboot. The command handler
- * has additional checks to reject insecure calls.
- */
-#define MC_CMD_COPYCODE 0x3
-#undef MC_CMD_0x3_PRIVILEGE_CTG
-
-#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_COPYCODE_IN msgrequest */
-#define MC_CMD_COPYCODE_IN_LEN 16
-/* Source address
- *
- * The main image should be entered via a copy of a single word from and to a
- * magic address, which controls various aspects of the boot. The magic address
- * is a bitfield, with each bit as documented below.
- */
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
-#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
- * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
- */
-#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
- * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
- * below)
- */
-#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
-/* Destination address */
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
-#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
-#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
-/* Address of where to jump after copy. */
-#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
-#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
-/* enum: Control should return to the caller rather than jumping */
-#define MC_CMD_COPYCODE_JUMP_NONE 0x1
-
-/* MC_CMD_COPYCODE_OUT msgresponse */
-#define MC_CMD_COPYCODE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SET_FUNC
- * Select function for function-specific commands.
- */
-#define MC_CMD_SET_FUNC 0x4
-#undef MC_CMD_0x4_PRIVILEGE_CTG
-
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_FUNC_IN msgrequest */
-#define MC_CMD_SET_FUNC_IN_LEN 4
-/* Set function */
-#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
-#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
-
-/* MC_CMD_SET_FUNC_OUT msgresponse */
-#define MC_CMD_SET_FUNC_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_BOOT_STATUS
* Get the instruction address from which the MC booted.
*/
@@ -2259,6 +2079,7 @@
/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
+/* enum property: bitmask */
/* enum: UART. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
@@ -2304,6 +2125,9 @@
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2326,6 +2150,9 @@
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2356,6 +2183,9 @@
* (depending on which components exist on a particular adapter)
*/
#define MC_CMD_GET_VERSION_V2_OUT_LEN 304
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2495,6 +2325,9 @@
* (depending on which components exist on a particular adapter)
*/
#define MC_CMD_GET_VERSION_V3_OUT_LEN 328
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2641,6 +2474,9 @@
* version information
*/
#define MC_CMD_GET_VERSION_V4_OUT_LEN 392
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2803,6 +2639,9 @@
* and board version information
*/
#define MC_CMD_GET_VERSION_V5_OUT_LEN 424
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -3065,8 +2904,18 @@
* subscribers.
*/
#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
-/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x1c
+/* enum: X4 and later adapters should use this instead of
+ * PTP_OP_TIME_EVENT_SUBSCRIBE. Subscribe to receive periodic time events
+ * indicating the current NIC time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE_V2 0x1c
+/* enum: For X4 and later NICs. Packet timestamps and time sync events have
+ * IS_SET and IN_SYNC flags, that indicates whether time is updated and if it
+ * is in sync with host. Once set, IN_SYNC flag is cleared by hardware after a
+ * software configurable time out. Host driver need to query what is set and
+ * what is maximum supported interval, this MCDI can be used to query these.
+ */
+#define MC_CMD_PTP_OP_GET_SYNC_TIMEOUT 0x1d
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -3507,6 +3356,22 @@
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2 msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_LEN 4
+/* Space for flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_LEN 4
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_WIDTH 1
+
/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
@@ -3540,6 +3405,13 @@
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
+/* MC_CMD_PTP_IN_GET_SYNC_TIMEOUT msgrequest */
+#define MC_CMD_PTP_IN_GET_SYNC_TIMEOUT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -3939,416 +3811,14 @@
/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
-
-/***********************************/
-/* MC_CMD_CSR_READ32
- * Read 32bit words from the indirect memory map.
- */
-#define MC_CMD_CSR_READ32 0xc
-#undef MC_CMD_0xc_PRIVILEGE_CTG
-
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CSR_READ32_IN msgrequest */
-#define MC_CMD_CSR_READ32_IN_LEN 12
-/* Address */
-#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
-#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
-#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
-
-/* MC_CMD_CSR_READ32_OUT msgresponse */
-#define MC_CMD_CSR_READ32_OUT_LENMIN 4
-#define MC_CMD_CSR_READ32_OUT_LENMAX 252
-#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
-/* The last dword is the status, not a value read */
-#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
-#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_CSR_WRITE32
- * Write 32bit dwords to the indirect memory map.
- */
-#define MC_CMD_CSR_WRITE32 0xd
-#undef MC_CMD_0xd_PRIVILEGE_CTG
-
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CSR_WRITE32_IN msgrequest */
-#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
-#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
-#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020
-#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4)
-/* Address */
-#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
-#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253
-
-/* MC_CMD_CSR_WRITE32_OUT msgresponse */
-#define MC_CMD_CSR_WRITE32_OUT_LEN 4
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_HP
- * These commands are used for HP related features. They are grouped under one
- * MCDI command to avoid creating too many MCDI commands.
- */
-#define MC_CMD_HP 0x54
-#undef MC_CMD_0x54_PRIVILEGE_CTG
-
-#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_HP_IN msgrequest */
-#define MC_CMD_HP_IN_LEN 16
-/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
- * the specified address with the specified interval.When address is NULL,
- * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
- * state / 2: (debug) Show temperature reported by one of the supported
- * sensors.
- */
-#define MC_CMD_HP_IN_SUBCMD_OFST 0
-#define MC_CMD_HP_IN_SUBCMD_LEN 4
-/* enum: OCSD (Option Card Sensor Data) sub-command. */
-#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
-/* enum: Last known valid HP sub-command. */
-#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
-/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
- */
-#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_LEN 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_LBN 32
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_WIDTH 32
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_LEN 4
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_LBN 64
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_WIDTH 32
-/* The requested update interval, in seconds. (Or the sub-command if ADDR is
- * NULL.)
- */
-#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
-#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
-
-/* MC_CMD_HP_OUT msgresponse */
-#define MC_CMD_HP_OUT_LEN 4
-#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
-#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
-/* enum: OCSD stopped for this card. */
-#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
-/* enum: OCSD was successfully started with the address provided. */
-#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
-/* enum: OCSD was already started for this card. */
-#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
-
-
-/***********************************/
-/* MC_CMD_STACKINFO
- * Get stack information.
- */
-#define MC_CMD_STACKINFO 0xf
-#undef MC_CMD_0xf_PRIVILEGE_CTG
-
-#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_STACKINFO_IN msgrequest */
-#define MC_CMD_STACKINFO_IN_LEN 0
-
-/* MC_CMD_STACKINFO_OUT msgresponse */
-#define MC_CMD_STACKINFO_OUT_LENMIN 12
-#define MC_CMD_STACKINFO_OUT_LENMAX 252
-#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12)
-/* (thread ptr, stack size, free space) for each thread in system */
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85
-
-
-/***********************************/
-/* MC_CMD_MDIO_READ
- * MDIO register read.
- */
-#define MC_CMD_MDIO_READ 0x10
-#undef MC_CMD_0x10_PRIVILEGE_CTG
-
-#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MDIO_READ_IN msgrequest */
-#define MC_CMD_MDIO_READ_IN_LEN 16
-/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
- * external devices.
- */
-#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
-#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
-/* enum: Internal. */
-#define MC_CMD_MDIO_BUS_INTERNAL 0x0
-/* enum: External. */
-#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
-/* Port address */
-#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
-/* Device Address or clause 22. */
-#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
-/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
- * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-#define MC_CMD_MDIO_CLAUSE22 0x20
-/* Address */
-#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
-
-/* MC_CMD_MDIO_READ_OUT msgresponse */
-#define MC_CMD_MDIO_READ_OUT_LEN 8
-/* Value */
-#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
-#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
-/* Status the MDIO commands return the raw status bits from the MDIO block. A
- * "good" transaction should have the DONE bit set and all other bits clear.
- */
-#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
-#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
-/* enum: Good. */
-#define MC_CMD_MDIO_STATUS_GOOD 0x8
-
-
-/***********************************/
-/* MC_CMD_MDIO_WRITE
- * MDIO register write.
- */
-#define MC_CMD_MDIO_WRITE 0x11
-#undef MC_CMD_0x11_PRIVILEGE_CTG
-
-#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_MDIO_WRITE_IN msgrequest */
-#define MC_CMD_MDIO_WRITE_IN_LEN 20
-/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
- * external devices.
- */
-#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
-#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
-/* enum: Internal. */
-/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
-/* enum: External. */
-/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
-/* Port address */
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
-/* Device Address or clause 22. */
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
-/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
- * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-/* MC_CMD_MDIO_CLAUSE22 0x20 */
-/* Address */
-#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
-#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
-
-/* MC_CMD_MDIO_WRITE_OUT msgresponse */
-#define MC_CMD_MDIO_WRITE_OUT_LEN 4
-/* Status; the MDIO commands return the raw status bits from the MDIO block. A
- * "good" transaction should have the DONE bit set and all other bits clear.
- */
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
-/* enum: Good. */
-/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
-
-
-/***********************************/
-/* MC_CMD_DBI_WRITE
- * Write DBI register(s).
- */
-#define MC_CMD_DBI_WRITE 0x12
-#undef MC_CMD_0x12_PRIVILEGE_CTG
-
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DBI_WRITE_IN msgrequest */
-#define MC_CMD_DBI_WRITE_IN_LENMIN 12
-#define MC_CMD_DBI_WRITE_IN_LENMAX 252
-#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12)
-/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
- * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
- */
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85
-
-/* MC_CMD_DBI_WRITE_OUT msgresponse */
-#define MC_CMD_DBI_WRITE_OUT_LEN 0
-
-/* MC_CMD_DBIWROP_TYPEDEF structuredef */
-#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_PORT_READ32
- * Read a 32-bit register from the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_READ32 0x14
-
-/* MC_CMD_PORT_READ32_IN msgrequest */
-#define MC_CMD_PORT_READ32_IN_LEN 4
-/* Address */
-#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
-
-/* MC_CMD_PORT_READ32_OUT msgresponse */
-#define MC_CMD_PORT_READ32_OUT_LEN 8
-/* Value */
-#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
-/* Status */
-#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
-#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_WRITE32
- * Write a 32-bit register to the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_WRITE32 0x15
-
-/* MC_CMD_PORT_WRITE32_IN msgrequest */
-#define MC_CMD_PORT_WRITE32_IN_LEN 8
-/* Address */
-#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
-
-/* MC_CMD_PORT_WRITE32_OUT msgresponse */
-#define MC_CMD_PORT_WRITE32_OUT_LEN 4
-/* Status */
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_READ128
- * Read a 128-bit register from the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_READ128 0x16
-
-/* MC_CMD_PORT_READ128_IN msgrequest */
-#define MC_CMD_PORT_READ128_IN_LEN 4
-/* Address */
-#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
-
-/* MC_CMD_PORT_READ128_OUT msgresponse */
-#define MC_CMD_PORT_READ128_OUT_LEN 20
-/* Value */
-#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
-/* Status */
-#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
-#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_WRITE128
- * Write a 128-bit register to the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_WRITE128 0x17
-
-/* MC_CMD_PORT_WRITE128_IN msgrequest */
-#define MC_CMD_PORT_WRITE128_IN_LEN 20
-/* Address */
-#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
-
-/* MC_CMD_PORT_WRITE128_OUT msgresponse */
-#define MC_CMD_PORT_WRITE128_OUT_LEN 4
-/* Status */
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
-
-/* MC_CMD_CAPABILITIES structuredef */
-#define MC_CMD_CAPABILITIES_LEN 4
-/* Small buf table. */
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
-/* Turbo mode (for Maranello). */
-#define MC_CMD_CAPABILITIES_TURBO_LBN 1
-#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
-/* Turbo mode active (for Maranello). */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
-/* PTP offload. */
-#define MC_CMD_CAPABILITIES_PTP_LBN 3
-#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
-/* AOE mode. */
-#define MC_CMD_CAPABILITIES_AOE_LBN 4
-#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
-/* AOE mode active. */
-#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
-#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
-/* AOE mode active. */
-#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
-#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
-#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
-#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+/* MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT msgresponse */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_LEN 8
+/* Current value set in NIC, in seconds */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_OFST 0
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_LEN 4
+/* Maximum supported by NIC, in seconds */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_OFST 4
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_LEN 4
/***********************************/
@@ -4427,112 +3897,6 @@
/***********************************/
-/* MC_CMD_DBI_READX
- * Read DBI register(s) -- extended functionality
- */
-#define MC_CMD_DBI_READX 0x19
-#undef MC_CMD_0x19_PRIVILEGE_CTG
-
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DBI_READX_IN msgrequest */
-#define MC_CMD_DBI_READX_IN_LENMIN 8
-#define MC_CMD_DBI_READX_IN_LENMAX 248
-#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016
-#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
-#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8)
-/* Each Read op consists of an address (offset 0), VF/CS2) */
-#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LEN 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LBN 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_WIDTH 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LEN 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LBN 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_WIDTH 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
-
-/* MC_CMD_DBI_READX_OUT msgresponse */
-#define MC_CMD_DBI_READX_OUT_LENMIN 4
-#define MC_CMD_DBI_READX_OUT_LENMAX 252
-#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4)
-/* Value */
-#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
-#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
-#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
-#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
-#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255
-
-/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
-#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_SET_RAND_SEED
- * Set the 16byte seed for the MC pseudo-random generator.
- */
-#define MC_CMD_SET_RAND_SEED 0x1a
-#undef MC_CMD_0x1a_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_RAND_SEED_IN msgrequest */
-#define MC_CMD_SET_RAND_SEED_IN_LEN 16
-/* Seed value. */
-#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
-#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
-
-/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
-#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LTSSM_HIST
- * Retrieve the history of the LTSSM, if the build supports it.
- */
-#define MC_CMD_LTSSM_HIST 0x1b
-
-/* MC_CMD_LTSSM_HIST_IN msgrequest */
-#define MC_CMD_LTSSM_HIST_IN_LEN 0
-
-/* MC_CMD_LTSSM_HIST_OUT msgresponse */
-#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
-#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
-#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4)
-/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
-#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
-#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255
-
-
-/***********************************/
/* MC_CMD_DRV_ATTACH
* Inform MCPU that this port is managed on the host (i.e. driver active). For
* Huntington, also request the preferred datapath firmware to use if possible
@@ -4705,6 +4069,7 @@
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
+/* enum property: bitshift */
/* enum: Labels the lowest-numbered function visible to the OS */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
/* enum: The function can control the link state of the physical port it is
@@ -4732,22 +4097,6 @@
/***********************************/
-/* MC_CMD_SHMUART
- * Route UART output to circular buffer in shared memory instead.
- */
-#define MC_CMD_SHMUART 0x1f
-
-/* MC_CMD_SHMUART_IN msgrequest */
-#define MC_CMD_SHMUART_IN_LEN 4
-/* ??? */
-#define MC_CMD_SHMUART_IN_FLAG_OFST 0
-#define MC_CMD_SHMUART_IN_FLAG_LEN 4
-
-/* MC_CMD_SHMUART_OUT msgresponse */
-#define MC_CMD_SHMUART_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_PORT_RESET
* Generic per-port reset. There is no equivalent for per-board reset. Locks
* required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
@@ -4790,100 +4139,6 @@
/***********************************/
-/* MC_CMD_PCIE_CREDITS
- * Read instantaneous and minimum flow control thresholds.
- */
-#define MC_CMD_PCIE_CREDITS 0x21
-
-/* MC_CMD_PCIE_CREDITS_IN msgrequest */
-#define MC_CMD_PCIE_CREDITS_IN_LEN 8
-/* poll period. 0 is disabled */
-#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
-#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
-/* wipe statistics */
-#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
-#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
-
-/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
-#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
-
-
-/***********************************/
-/* MC_CMD_RXD_MONITOR
- * Get histogram of RX queue fill level.
- */
-#define MC_CMD_RXD_MONITOR 0x22
-
-/* MC_CMD_RXD_MONITOR_IN msgrequest */
-#define MC_CMD_RXD_MONITOR_IN_LEN 12
-#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
-#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
-#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
-#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
-#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
-#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
-
-/* MC_CMD_RXD_MONITOR_OUT msgresponse */
-#define MC_CMD_RXD_MONITOR_OUT_LEN 80
-#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
-#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
-#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
-
-
-/***********************************/
/* MC_CMD_PUTS
* Copy the given ASCII string out onto UART and/or out of the network port.
*/
@@ -4931,6 +4186,54 @@
/* MC_CMD_GET_PHY_CFG_IN msgrequest */
#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+/* MC_CMD_GET_PHY_CFG_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_V2_LEN 8
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
/* flags */
@@ -5026,6 +4329,9 @@
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_200000FDX_OFST 8
+#define MC_CMD_PHY_CAP_200000FDX_LBN 22
+#define MC_CMD_PHY_CAP_200000FDX_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
@@ -5059,6 +4365,7 @@
#define MC_CMD_MEDIA_DSFP 0x8
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
+/* enum property: bitshift */
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -5084,7 +4391,7 @@
#define MC_CMD_START_BIST 0x25
#undef MC_CMD_0x25_PRIVILEGE_CTG
-#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
@@ -5124,7 +4431,7 @@
#define MC_CMD_POLL_BIST 0x26
#undef MC_CMD_0x26_PRIVILEGE_CTG
-#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_POLL_BIST_IN msgrequest */
#define MC_CMD_POLL_BIST_IN_LEN 0
@@ -5282,33 +4589,6 @@
/***********************************/
-/* MC_CMD_FLUSH_RX_QUEUES
- * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
- * flushes should be initiated via this MCDI operation, rather than via
- * directly writing FLUSH_CMD.
- *
- * The flush is completed (either done/fail) asynchronously (after this command
- * returns). The driver must still wait for flush done/failure events as usual.
- */
-#define MC_CMD_FLUSH_RX_QUEUES 0x27
-
-/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4)
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255
-
-/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
-#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_LOOPBACK_MODES
* Returns a bitmask of loopback modes available at each speed.
*/
@@ -5320,6 +4600,54 @@
/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+/* MC_CMD_GET_LOOPBACK_MODES_IN_V2 msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_LEN 8
+/* Target port to request loopback modes for. Uses MAE_LINK_ENDPOINT_SELECTOR
+ * which identifies a real or virtual network port by MAE port and link end.
+ * See the structure definition for more details
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
/* Supported loopbacks. */
@@ -5333,6 +4661,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LBN 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_WIDTH 32
+/* enum property: bitshift */
/* enum: None. */
#define MC_CMD_LOOPBACK_NONE 0x0
/* enum: Data. */
@@ -5422,6 +4751,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LBN 96
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5435,6 +4765,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LBN 160
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5448,6 +4779,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LBN 224
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5461,6 +4793,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LBN 288
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
@@ -5479,6 +4812,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LBN 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_WIDTH 32
+/* enum property: bitshift */
/* enum: None. */
/* MC_CMD_LOOPBACK_NONE 0x0 */
/* enum: Data. */
@@ -5568,6 +4902,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LBN 96
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5581,6 +4916,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LBN 160
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5594,6 +4930,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LBN 224
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5607,6 +4944,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LBN 288
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 25G loopbacks. */
@@ -5620,6 +4958,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LBN 352
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 50 loopbacks. */
@@ -5633,6 +4972,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LBN 416
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 100G loopbacks. */
@@ -5646,6 +4986,214 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LBN 480
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V3 msgresponse: Supported loopback modes for
+ * newer NICs with 200G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_LEN 72
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_WIDTH 32
+/* enum property: bitshift */
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LBN 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_OFST 12
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LBN 96
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LBN 128
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_OFST 20
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LBN 160
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LBN 192
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_OFST 28
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LBN 224
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LBN 256
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_OFST 36
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LBN 288
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LBN 320
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_OFST 44
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LBN 352
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LBN 384
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_OFST 52
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LBN 416
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LBN 448
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_OFST 60
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LBN 480
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 200G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_OFST 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_OFST 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LBN 512
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_OFST 68
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LBN 544
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
@@ -5673,13 +5221,835 @@
#define FEC_TYPE_TYPE_LEN 4
/* enum: No FEC */
#define MC_CMD_FEC_NONE 0x0
-/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */
+/* enum: IEEE 802.3, Clause 74 BASE-R FEC (a.k.a Firecode) */
#define MC_CMD_FEC_BASER 0x1
-/* enum: Clause 91/Clause 108 Reed-Solomon FEC */
+/* enum: IEEE 802.3, Clause 91/Clause 108 Reed-Solomon FEC */
#define MC_CMD_FEC_RS 0x2
+/* enum: IEEE 802.3, Clause 161, interleaved RS-FEC sublayer for 100GBASE-R
+ * PHYs
+ */
+#define MC_CMD_FEC_IEEE_RS_INT 0x3
+/* enum: Ethernet Consortium, Low Latency RS-FEC. RS(272, 258). Replaces FEC
+ * specified in Clause 119 for 100/200G PHY. Replaces FEC specified in Clause
+ * 134 for 50G PHY.
+ */
+#define MC_CMD_FEC_ETCS_RS_LL 0x4
+/* enum: FEC mode selected automatically */
+#define MC_CMD_FEC_AUTO 0x5
#define FEC_TYPE_TYPE_LBN 0
#define FEC_TYPE_TYPE_WIDTH 32
+/* MC_CMD_ETH_TECH structuredef: Ethernet technology as defined by IEEE802.3,
+ * Ethernet Technology Consortium, proprietary technologies. The driver must
+ * not use technologies labelled NONE and AUTO.
+ */
+#define MC_CMD_ETH_TECH_LEN 16
+/* The enums in this field can be used either as bitwise indices into a tech
+ * mask (e.g. see MC_CMD_ETH_AN_FIELDS/TECH_MASK for example) or as regular
+ * enums (e.g. see MC_CMD_LINK_CTRL_IN/ADVERTISED_TECH_ABILITIES_MASK). This
+ * structure must be updated to add new technologies when projects that need
+ * them arise. An incomplete list of possible expansion in the future include:
+ * 100GBASE_KP4, 800GBASE_CR8, 800GBASE_KR8, 800GBASE_DR8, 800GBASE_SR8
+ * 800GBASE_VR8
+ */
+#define MC_CMD_ETH_TECH_TECH_OFST 0
+#define MC_CMD_ETH_TECH_TECH_LEN 16
+/* enum: 1000BASE-KX - 1000BASE-X PCS/PMA over an electrical backplane PMD. See
+ * IEEE 802.3 Clause 70
+ */
+#define MC_CMD_ETH_TECH_1000BASEKX 0x0
+/* enum: 10GBASE-R - PCS/PMA over an electrical backplane PMD. Refer to IEEE
+ * 802.3 Clause 72
+ */
+#define MC_CMD_ETH_TECH_10GBASE_KR 0x1
+/* enum: 40GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 84.
+ */
+#define MC_CMD_ETH_TECH_40GBASE_KR4 0x2
+/* enum: 40GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 85
+ */
+#define MC_CMD_ETH_TECH_40GBASE_CR4 0x3
+/* enum: 40GBASE-R PCS/PMA over 4 lane multimode fiber PMD as specified in
+ * Clause 86
+ */
+#define MC_CMD_ETH_TECH_40GBASE_SR4 0x4
+/* enum: 40GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD with long
+ * reach. See IEEE 802.3 Clause 87
+ */
+#define MC_CMD_ETH_TECH_40GBASE_LR4 0x5
+/* enum: 25GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE
+ * 802.3 Clause 110
+ */
+#define MC_CMD_ETH_TECH_25GBASE_CR 0x6
+/* enum: 25GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 111
+ */
+#define MC_CMD_ETH_TECH_25GBASE_KR 0x7
+/* enum: 25GBASE-R PCS/PMA over multimode fiber PMD. Refer to IEEE 802.3 Clause
+ * 112
+ */
+#define MC_CMD_ETH_TECH_25GBASE_SR 0x8
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on twin-axial copper
+ * cable. Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_CR2 0x9
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on copper backplane.
+ * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_KR2 0xa
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 93
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR4 0xb
+/* enum: 100GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 95
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR4 0xc
+/* enum: 100GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 92
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR4 0xd
+/* enum: 100GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD, with
+ * long/extended reach,. See IEEE 802.3 Clause 88
+ */
+#define MC_CMD_ETH_TECH_100GBASE_LR4_ER4 0xe
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on short reach fiber.
+ * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_SR2 0xf
+/* enum: 1000BASEX PCS/PMA. See IEEE 802.3 Clause 36 over undefined PMD, duplex
+ * mode unknown
+ */
+#define MC_CMD_ETH_TECH_1000BASEX 0x10
+/* enum: Non-standardised. 10G direct attach */
+#define MC_CMD_ETH_TECH_10GBASE_CR 0x11
+/* enum: 10GBASE-SR fiber over 850nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_SR 0x12
+/* enum: 10GBASE-LR fiber over 1310nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_LR 0x13
+/* enum: 10GBASE-LRM fiber over 1310 nm optics. See IEEE 802.3 Clause 68 */
+#define MC_CMD_ETH_TECH_10GBASE_LRM 0x14
+/* enum: 10GBASE-ER fiber over 1550nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_ER 0x15
+/* enum: 50GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_50GBASE_KR 0x16
+/* enum: 50GBASE-SR PCS/PMA over multimode fiber PMD as specified in Clause 138
+ */
+#define MC_CMD_ETH_TECH_50GBASE_SR 0x17
+/* enum: 50GBASE-CR PCS/PMA over shielded copper balanced cable PMD. See IEEE
+ * 802.3 Clause 136
+ */
+#define MC_CMD_ETH_TECH_50GBASE_CR 0x18
+/* enum: 50GBASE-R PCS/PMA over single mode fiber PMD as specified in Clause
+ * 139.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_LR_ER_FR 0x19
+/* enum: 100 Gb/s PHY using 100GBASE-R encoding over single-mode fiber with
+ * reach up to at least 500 m (see IEEE 802.3 Clause 140)
+ */
+#define MC_CMD_ETH_TECH_50GBASE_DR 0x1a
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR2 0x1b
+/* enum: 100GBASE-R PCS/PMA over 2 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR2 0x1c
+/* enum: 100GBASE-R PCS/PMA over 2 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 136
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR2 0x1d
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_100GBASE_LR2_ER2_FR2 0x1e
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_100GBASE_DR2 0x1f
+/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_200GBASE_KR4 0x20
+/* enum: 200GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_200GBASE_SR4 0x21
+/* enum: 200GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD as specified
+ * in Clause 122
+ */
+#define MC_CMD_ETH_TECH_200GBASE_LR4_ER4_FR4 0x22
+/* enum: 200GBASE-R PCS/PMA over 4-lane single-mode fiber PMD. See IEEE 802.3
+ * Clause 121
+ */
+#define MC_CMD_ETH_TECH_200GBASE_DR4 0x23
+/* enum: 200GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD as
+ * specified in Clause 136
+ */
+#define MC_CMD_ETH_TECH_200GBASE_CR4 0x24
+/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-KR8 PMD uses
+ * 802.3 Clause 137, but the number PMD lanes is 8.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_KR8 0x25
+/* enum: 400GBASE-R PCS/PMA over 8-lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_400GBASE_SR8 0x26
+/* enum: 400GBASE-R PCS/PMA over 8 WDM lane single-mode fiber PMD. See IEEE
+ * 802.3 Clause 122
+ */
+#define MC_CMD_ETH_TECH_400GBASE_LR8_ER8_FR8 0x27
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_400GBASE_DR8 0x28
+/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-CR8 PMD uses
+ * IEEE 802.3 Clause 136, but the number PMD lanes is 8.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_CR8 0x29
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3ck
+ * Clause 163.
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR 0x2a
+/* enum: IEEE 802.3ck. 100G PHY with PMD as specified in Clause 167 over short
+ * reach fiber
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR 0x2b
+/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause
+ * 140
+ */
+#define MC_CMD_ETH_TECH_100GBASE_LR_ER_FR 0x2c
+/* enum: 100GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE
+ * 802.3 in Clause 162 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR 0x2d
+/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause
+ * 140
+ */
+#define MC_CMD_ETH_TECH_100GBASE_DR 0x2e
+/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD as specified in
+ * Clause 163 IEEE 802.3ck
+ */
+#define MC_CMD_ETH_TECH_200GBASE_KR2 0x2f
+/* enum: 200G PHY with PMD as specified in Clause 167 over short reach fiber
+ * IEEE 802.3ck
+ */
+#define MC_CMD_ETH_TECH_200GBASE_SR2 0x30
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_200GBASE_LR2_ER2_FR2 0x31
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_200GBASE_DR2 0x32
+/* enum: 200GBASE-R PCS/PMA over 2 lane shielded balanced copper cable PMD as
+ * specified in Clause 162 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_200GBASE_CR2 0x33
+/* enum: 400GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 163 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_KR4 0x34
+/* enum: 400G PHY with PMD over short reach fiber. See Clause 167 of IEEE
+ * 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_SR4 0x35
+/* enum: 400GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD. See IEEE
+ * 802.3 Clause 151
+ */
+#define MC_CMD_ETH_TECH_400GBASE_LR4_ER4_FR4 0x36
+/* enum: 400GBASE-R PCS/PMA over 4-lane single-mode fiber PMD as specified in
+ * Clause 124
+ */
+#define MC_CMD_ETH_TECH_400GBASE_DR4 0x37
+/* enum: 400GBASE-R PCS/PMA over 4 lane shielded balanced copper cable PMD as
+ * specified in Clause 162 of IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_CR4 0x38
+/* enum: Automatic tech mode. The driver must not use this. */
+#define MC_CMD_ETH_TECH_AUTO 0x39
+/* enum: See IEEE 802.3cc-2017 Clause 114 */
+#define MC_CMD_ETH_TECH_25GBASE_LR_ER 0x3a
+/* enum: Up to 7 m over twinaxial copper cable assembly (10 lanes, 10 Gbit/s
+ * each) See IEEE 802.3ba-2010 Clause 85
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR10 0x3b
+/* enum: Invalid tech mode. The driver must not use this. */
+#define MC_CMD_ETH_TECH_NONE 0x7f
+#define MC_CMD_ETH_TECH_TECH_LBN 0
+#define MC_CMD_ETH_TECH_TECH_WIDTH 128
+
+/* MC_CMD_LINK_STATUS_FLAGS structuredef */
+#define MC_CMD_LINK_STATUS_FLAGS_LEN 8
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: bitshift */
+/* enum: Whether we have overall link up */
+#define MC_CMD_LINK_STATUS_FLAGS_LINK_UP 0x0
+/* enum: If set, the PHY has no external RX link synchronisation */
+#define MC_CMD_LINK_STATUS_FLAGS_NO_PHY_LINK 0x1
+/* enum: If set, PMD/MDI is not connected (e.g. cable disconnected, module cage
+ * empty)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_MDI_DISCONNECTED 0x2
+/* enum: Set on error while decoding module data (e.g. module EEPROM does not
+ * contain valid values, has checksum errors, etc.)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_BAD 0x3
+/* enum: Set when module unsupported (e.g. unsupported link rate or link
+ * technology)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_UNSUPPORTED 0x4
+/* enum: Set on error while communicating with the module (e.g. I2C errors
+ * while reading EEPROM)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_COMMS_FAULT 0x5
+/* enum: Set on module overcurrent/overvoltage condition */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_POWER_FAULT 0x6
+/* enum: Set on module overtemperature condition */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_THERMAL_FAULT 0x7
+/* enum: If set, the module is indicating Loss of Signal */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_LOS 0x8
+/* enum: If set, PMA is indicating loss of CDR lock (clock sync) */
+#define MC_CMD_LINK_STATUS_FLAGS_PMA_NO_CDR_LOCK 0x9
+/* enum: If set, PMA is indicating loss of analog signal */
+#define MC_CMD_LINK_STATUS_FLAGS_PMA_LOS 0xa
+/* enum: If set, PCS is indicating loss of block lock */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_BLOCK_LOCK 0xb
+/* enum: If set, PCS is indicating loss of alignment marker lock on one or more
+ * lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_AM_LOCK 0xc
+/* enum: If set, PCS is indicating loss of overall alignment lock */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_ALIGN_LOCK 0xd
+/* enum: If set, PCS is indicating high bit error rate condition. */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_HI_BER 0xe
+/* enum: If set, FEC is indicating loss of FEC lock */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_NO_LOCK 0xf
+/* enum: If set, indicates that the number of symbol errors in a 8192-codeword
+ * window has exceeded the threshold K (417).
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_HI_SER 0x10
+/* enum: If set, the receiver has detected the local FEC has degraded. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_LOCAL_DEGRADED 0x11
+/* enum: If set, the receiver has detected the remote FEC has degraded. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_RM_DEGRADED 0x12
+/* enum: If set, the number of symbol errors is over an internal threshold. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_DEGRADED_SER 0x13
+/* enum: If set, autonegotiation has detected an auto-negotiation capable link
+ * partner
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_ABLE 0x14
+/* enum: If set, autonegotiation base page exchange has failed */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_BP_FAILED 0x15
+/* enum: If set, autonegotiation next page exchange has failed */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_NP_FAILED 0x16
+/* enum: If set, autonegotiation has failed to negotiate a common set of
+ * capabilities
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_NO_HCD 0x17
+/* enum: If set, local end link training has failed to establish link training
+ * frame lock on one or more lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_LOCAL_FRAME_LOCK 0x18
+/* enum: If set, remote end link training has failed to establish link training
+ * frame lock on one or more lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RM_FRAME_LOCK 0x19
+/* enum: If set, remote end has failed to assert Receiver Ready (link training
+ * success) within the designated timeout
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RX_READY 0x1a
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LBN 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_WIDTH 64
+
+/* MC_CMD_PAUSE_MODE structuredef */
+#define MC_CMD_PAUSE_MODE_LEN 1
+#define MC_CMD_PAUSE_MODE_TYPE_OFST 0
+#define MC_CMD_PAUSE_MODE_TYPE_LEN 1
+/* enum: See IEEE 802.3 Clause 73.6.6 */
+#define MC_CMD_PAUSE_MODE_AN_PAUSE 0x0
+/* enum: See IEEE 802.3 Clause 73.6.6 */
+#define MC_CMD_PAUSE_MODE_AN_ASYM_DIR 0x1
+#define MC_CMD_PAUSE_MODE_TYPE_LBN 0
+#define MC_CMD_PAUSE_MODE_TYPE_WIDTH 8
+
+/* MC_CMD_ETH_AN_FIELDS structuredef: Fields used for IEEE 802.3 Clause 73
+ * Auto-Negotiation. Warning - This is fixed size and cannot be extended. This
+ * structure is used to define autonegotiable abilities (advertised, link
+ * partner and supported abilities).
+ */
+#define MC_CMD_ETH_AN_FIELDS_LEN 25
+/* Mask of Ethernet technologies. The bit indices in this mask are taken from
+ * the TECH field in the MC_CMD_ETH_TECH structure.
+ */
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_OFST 0
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LBN 0
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_WIDTH 128
+/* Mask of supported FEC modes */
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_OFST 16
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LBN 128
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_WIDTH 32
+/* Mask of requested FEC modes */
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_OFST 20
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LBN 160
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_WIDTH 32
+/* Bitmask of negotiated pause modes */
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_OFST 24
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LBN 192
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_WIDTH 8
+
+/* MC_CMD_LOOPBACK_V2 structuredef: Loopback modes for use with the new
+ * MC_CMD_LINK_CTRL and MC_CMD_LINK_STATE. These loopback modes are not
+ * supported in other getlink/setlink commands.
+ */
+#define MC_CMD_LOOPBACK_V2_LEN 4
+#define MC_CMD_LOOPBACK_V2_MODE_OFST 0
+#define MC_CMD_LOOPBACK_V2_MODE_LEN 4
+/* enum: No loopback */
+#define MC_CMD_LOOPBACK_V2_NONE 0x0
+/* enum: Let firmware choose a supported loopback mode */
+#define MC_CMD_LOOPBACK_V2_AUTO 0x1
+/* enum: Loopback after the MAC */
+#define MC_CMD_LOOPBACK_V2_POST_MAC 0x2
+/* enum: Loopback after the PCS */
+#define MC_CMD_LOOPBACK_V2_POST_PCS 0x3
+/* enum: Loopback after the PMA */
+#define MC_CMD_LOOPBACK_V2_POST_PMA 0x4
+/* enum: Loopback after the MDI Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_MDI_WS 0x5
+/* enum: Loopback after the PMA Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_PMA_WS 0x6
+/* enum: Loopback after the PCS Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_PCS_WS 0x7
+/* enum: Loopback after the MAC Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_MAC_WS 0x8
+/* enum: Loopback after the MAC FIFOs (before the MAC) */
+#define MC_CMD_LOOPBACK_V2_PRE_MAC 0x9
+#define MC_CMD_LOOPBACK_V2_MODE_LBN 0
+#define MC_CMD_LOOPBACK_V2_MODE_WIDTH 32
+
+/* MC_CMD_FCNTL structuredef */
+#define MC_CMD_FCNTL_LEN 4
+#define MC_CMD_FCNTL_MASK_OFST 0
+#define MC_CMD_FCNTL_MASK_LEN 4
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto negotiate flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control. This is only supported on KSB. */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_FCNTL_MASK_LBN 0
+#define MC_CMD_FCNTL_MASK_WIDTH 32
+
+/* MC_CMD_LINK_FLAGS structuredef */
+#define MC_CMD_LINK_FLAGS_LEN 4
+/* The enums defined in this field are used as indices into the
+ * MC_CMD_LINK_FLAGS bitmask.
+ */
+#define MC_CMD_LINK_FLAGS_MASK_OFST 0
+#define MC_CMD_LINK_FLAGS_MASK_LEN 4
+/* enum property: bitshift */
+/* enum: Enable auto-negotiation. If AN is enabled, link technology and FEC
+ * mode are determined by advertised capabilities and requested FEC modes,
+ * combined with link partner capabilities. If AN is disabled, link technology
+ * is forced to LINK_TECHNOLOGY and FEC mode is forced to FEC_MODE. Not valid
+ * if loopback is enabled
+ */
+#define MC_CMD_LINK_FLAGS_AUTONEG_EN 0x0
+/* enum: Enable parallel detect. In addition to AN, try to sense partner forced
+ * speed/FEC mode (when partner AN disabled). Only valid if AN is enabled.
+ */
+#define MC_CMD_LINK_FLAGS_PARALLEL_DETECT_EN 0x1
+/* enum: Force link down, in electrical idle. */
+#define MC_CMD_LINK_FLAGS_LINK_DISABLE 0x2
+/* enum: Ignore the sequence number and always apply. */
+#define MC_CMD_LINK_FLAGS_IGNORE_MODULE_SEQ 0x3
+#define MC_CMD_LINK_FLAGS_MASK_LBN 0
+#define MC_CMD_LINK_FLAGS_MASK_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_LINK_CTRL
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME, EAGAIN
+ */
+#define MC_CMD_LINK_CTRL 0x6b
+#undef MC_CMD_0x6b_PRIVILEGE_CTG
+
+#define MC_CMD_0x6b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_LINK_CTRL_IN msgrequest */
+#define MC_CMD_LINK_CTRL_IN_LEN 40
+/* Handle to the port to set link state for. */
+#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_LEN 4
+/* Control flags */
+#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_OFST 4
+#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Reserved for future expansion, and included to provide padding for alignment
+ * purposes.
+ */
+#define MC_CMD_LINK_CTRL_IN_RESERVED_OFST 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LEN 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_OFST 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LEN 4
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LBN 64
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_WIDTH 32
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_OFST 12
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LEN 4
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LBN 96
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_WIDTH 32
+/* Technology abilities to advertise during auto-negotiation */
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_OFST 16
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Pause abilities to advertise during auto-negotiation. Valid when auto-
+ * negotation is enabled and MC_CMD_SET_MAC_IN/FCTL is set to
+ * MC_CMD_FCNTL_AUTO. If auto-negotiation is disabled the driver must
+ * explicitly configure pause mode with MC_CMD_SET_MAC.
+ */
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_OFST 32
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* When auto-negotiation is enabled, this is the FEC mode to request. Note that
+ * a weaker FEC mode may get negotiated, depending on what the link partner
+ * supports. The driver should subsequently use MC_CMD_GET_LINK to check the
+ * actual negotiated FEC mode. When auto-negotiation is disabled, this is the
+ * forced FEC mode.
+ */
+#define MC_CMD_LINK_CTRL_IN_FEC_MODE_OFST 33
+#define MC_CMD_LINK_CTRL_IN_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* This is only to be used when auto-negotiation is disabled (forced speed or
+ * loopback mode). If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_OFST 36
+#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* The sequence number of the last MODULECHANGE event. If this doesn't match,
+ * fail with EAGAIN.
+ */
+#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_OFST 38
+#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_LEN 1
+/* Loopback Mode. Only valid when auto-negotiation is disabled. */
+#define MC_CMD_LINK_CTRL_IN_LOOPBACK_OFST 39
+#define MC_CMD_LINK_CTRL_IN_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+
+/* MC_CMD_LINK_CTRL_OUT msgresponse */
+#define MC_CMD_LINK_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE
+ */
+#define MC_CMD_LINK_STATE 0x6c
+#undef MC_CMD_0x6c_PRIVILEGE_CTG
+
+#define MC_CMD_0x6c_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_LINK_STATE_IN msgrequest */
+#define MC_CMD_LINK_STATE_IN_LEN 4
+/* Handle to the port to get link state for. */
+#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_LINK_STATE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_OUT_LEN 114
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_LEN 32
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_OFST 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_OFST 32
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_OFST 36
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_OFST 40
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_LEN 1
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_LEN 32
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_OFST 48
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_OFST 64
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_OFST 68
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_OFST 72
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_LEN 1
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_LEN 28
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_OFST 80
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_OFST 96
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_OFST 100
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_OFST 104
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_LEN 1
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+
+/* MC_CMD_LINK_STATE_OUT_V2 msgresponse: Updated LINK_STATE_OUT with
+ * LOCAL_AN_SUPPORT
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LEN 120
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_LEN 32
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_LEN 32
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_LEN 28
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+/* Reports the auto-negotiation supported by the local device. This depends on
+ * the port and module properties.
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_OFST 116
+#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+
+/* MC_CMD_LINK_STATE_OUT_V3 msgresponse: Updated LINK_STATE_OUT_V2 for explicit
+ * reporting of the link speed and duplex mode.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LEN 128
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_LEN 32
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_LEN 32
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_LEN 28
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+/* Reports the auto-negotiation supported by the local device. This depends on
+ * the port and module properties.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_OFST 116
+#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero. LINK_SPEED field is intended to be used by drivers without
+ * the most up-to-date MCDI definitions, unable to deduce the link speed from
+ * the reported LINK_TECHNOLOGY field.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_OFST 120
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_OFST 124
+#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_OFST 124
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_WIDTH 1
+
/***********************************/
/* MC_CMD_GET_LINK
@@ -5694,6 +6064,54 @@
/* MC_CMD_GET_LINK_IN msgrequest */
#define MC_CMD_GET_LINK_IN_LEN 0
+/* MC_CMD_GET_LINK_IN_V2 msgrequest */
+#define MC_CMD_GET_LINK_IN_V2_LEN 8
+/* Target port to request link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details.
+ */
+#define MC_CMD_GET_LINK_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_LINK_OUT msgresponse */
#define MC_CMD_GET_LINK_OUT_LEN 28
/* Near-side advertised capabilities. Refer to
@@ -5745,6 +6163,7 @@
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
+/* enum property: value */
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
@@ -5813,6 +6232,7 @@
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
+/* enum property: value */
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
@@ -5969,6 +6389,95 @@
#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7
#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1
+/* MC_CMD_SET_LINK_IN_V3 msgrequest */
+#define MC_CMD_SET_LINK_IN_V3_LEN 28
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_SET_LINK_IN_V3_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_V3_CAP_LEN 4
+/* Flags */
+#define MC_CMD_SET_LINK_IN_V3_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_FLAGS_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_LBN 3
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_LBN 0
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_WIDTH 7
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_LBN 7
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_WIDTH 1
+/* Padding */
+#define MC_CMD_SET_LINK_IN_V3_RESERVED_OFST 17
+#define MC_CMD_SET_LINK_IN_V3_RESERVED_LEN 3
+/* Target port to set link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_SET_LINK_IN_V3_TARGET_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LEN 8
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_WIDTH 32
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LBN 192
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_OFST 23
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LEN 8
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LBN 192
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_SET_LINK_OUT msgresponse */
#define MC_CMD_SET_LINK_OUT_LEN 0
@@ -6034,17 +6543,17 @@
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
-#define MC_CMD_FCNTL_OFF 0x0
+/* MC_CMD_FCNTL_OFF 0x0 */
/* enum: Respond to flow control. */
-#define MC_CMD_FCNTL_RESPOND 0x1
+/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
-#define MC_CMD_FCNTL_BIDIR 0x2
-/* enum: Auto neg flow control. */
-#define MC_CMD_FCNTL_AUTO 0x3
-/* enum: Priority flow control (eftest builds only). */
-#define MC_CMD_FCNTL_QBB 0x4
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto negotiate flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control. This is only supported on KSB. */
+/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
-#define MC_CMD_FCNTL_GENERATE 0x5
+/* MC_CMD_FCNTL_GENERATE 0x5 */
#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24
@@ -6086,9 +6595,9 @@
/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-/* enum: Auto neg flow control. */
+/* enum: Auto negotiate flow control. */
/* MC_CMD_FCNTL_AUTO 0x3 */
-/* enum: Priority flow control (eftest builds only). */
+/* enum: Priority flow control. This is only supported on KSB. */
/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
@@ -6155,9 +6664,9 @@
/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-/* enum: Auto neg flow control. */
+/* enum: Auto negotiate flow control. */
/* MC_CMD_FCNTL_AUTO 0x3 */
-/* enum: Priority flow control (eftest builds only). */
+/* enum: Priority flow control. This is only supported on KSB. */
/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
@@ -6188,19 +6697,9 @@
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_OFST 28
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_LBN 4
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_WIDTH 1
-/* Identifies the MAC to update by the specifying the end of a logical MAE
- * link. Setting TARGET to MAE_LINK_ENDPOINT_COMPAT is equivalent to using the
- * previous version of the command (MC_CMD_SET_MAC_EXT). Not all possible
- * combinations of MPORT_END and MPORT_SELECTOR in TARGET will work in all
- * circumstances. 1. Some will always work (e.g. a VF can always address its
- * logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC), 2. Some are not
- * meaningful and will always fail with EINVAL (e.g. attempting to address the
- * VNIC end of a link to a physical port), 3. Some are meaningful but require
- * the MCDI client to have the required permission and fail with EPERM
- * otherwise (e.g. trying to set the MAC on a VF the caller cannot administer),
- * and 4. Some could be implementation-specific and fail with ENOTSUP if not
- * available (no examples exist right now). See SF-123581-TC section 4.3 for
- * more details.
+/* Target port to set mac state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
*/
#define MC_CMD_SET_MAC_V3_IN_TARGET_OFST 32
#define MC_CMD_SET_MAC_V3_IN_TARGET_LEN 8
@@ -6212,6 +6711,7 @@
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LEN 4
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LBN 288
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_OFST 32
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_LEN 4
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 32
@@ -6405,6 +6905,98 @@
#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
+/* MC_CMD_MAC_STATS_V2_IN msgrequest */
+#define MC_CMD_MAC_STATS_V2_IN_LEN 28
+/* ??? */
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_LEN 4
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_LEN 4
+/* Target port to request statistics for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LBN 192
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_OFST 23
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LBN 192
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -6421,6 +7013,7 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+/* enum property: index */
#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
@@ -6583,6 +7176,7 @@
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum property: index */
/* enum: Start of FEC stats buffer space, Medford2 and up */
#define MC_CMD_MAC_FEC_DMABUF_START 0x61
/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
@@ -6622,6 +7216,7 @@
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum property: index */
/* enum: Start of CTPIO stats buffer space, Medford2 and up */
#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
@@ -6702,6 +7297,7 @@
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum property: index */
/* enum: Start of V4 stats buffer space */
#define MC_CMD_MAC_V4_DMABUF_START 0x79
/* enum: RXDP counter: Number of packets truncated because scattering was
@@ -6723,112 +7319,35 @@
/* Other enum values, see field(s): */
/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
-
-/***********************************/
-/* MC_CMD_SRIOV
- * to be documented
- */
-#define MC_CMD_SRIOV 0x30
-
-/* MC_CMD_SRIOV_IN msgrequest */
-#define MC_CMD_SRIOV_IN_LEN 12
-#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
-#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
-#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
-#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
-#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
-#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
-
-/* MC_CMD_SRIOV_OUT msgresponse */
-#define MC_CMD_SRIOV_OUT_LEN 8
-#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
-#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
-#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
-#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
-
-/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
-/* this is only used for the first record */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LBN 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LBN 96
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LBN 160
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LBN 192
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_MEMCPY
- * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
- * embedded directly in the command.
- *
- * A common pattern is for a client to use generation counts to signal a dma
- * update of a datastructure. To facilitate this, this MCDI operation can
- * contain multiple requests which are executed in strict order. Requests take
- * the form of duplicating the entire MCDI request continuously (including the
- * requests record, which is ignored in all but the first structure)
- *
- * The source data can either come from a DMA from the host, or it can be
- * embedded within the request directly, thereby eliminating a DMA read. To
- * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
- * ADDR_LO=offset, and inserts the data at %offset from the start of the
- * payload. It's the callers responsibility to ensure that the embedded data
- * doesn't overlap the records.
- *
- * Returns: 0, EINVAL (invalid RID)
- */
-#define MC_CMD_MEMCPY 0x31
-
-/* MC_CMD_MEMCPY_IN msgrequest */
-#define MC_CMD_MEMCPY_IN_LENMIN 32
-#define MC_CMD_MEMCPY_IN_LENMAX 224
-#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992
-#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
-#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32)
-/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
-#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
-#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
-#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
-#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
-#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31
-
-/* MC_CMD_MEMCPY_OUT msgresponse */
-#define MC_CMD_MEMCPY_OUT_LEN 0
+/* MC_CMD_MAC_STATS_V5_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V5_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V5_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V5*64))>>3)
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V5
+/* enum property: index */
+/* enum: Start of V5 stats buffer space */
+#define MC_CMD_MAC_V5_DMABUF_START 0x7c
+/* enum: Link toggle counter: Number of times the link has toggled between
+ * up/down and down/up
+ */
+#define MC_CMD_MAC_LINK_TOGGLES 0x7c
+/* enum: This includes the space at offset 125 which is the final
+ * GENERATION_END in a MAC_STATS_V5 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V5 0x7e
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA/STATISTICS */
/***********************************/
@@ -6984,6 +7503,7 @@
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
+/* enum property: bitmask */
#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
@@ -6992,23 +7512,6 @@
/***********************************/
-/* MC_CMD_SET_MCAST_HASH
- * Set the MCAST hash value without otherwise reconfiguring the MAC
- */
-#define MC_CMD_SET_MCAST_HASH 0x35
-
-/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
-#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
-
-/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
-#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_NVRAM_TYPES
* Return bitfield indicating available types of virtual NVRAM partitions.
* Locks required: none. Returns: 0
@@ -7026,6 +7529,7 @@
/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
+/* enum property: bitshift */
/* enum: Disabled callisto. */
#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
/* enum: MC firmware. */
@@ -7152,6 +7656,12 @@
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_LBN 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_LBN 9
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
@@ -7499,6 +8009,128 @@
#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19
/* enum: The update operation is in-progress. */
#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a
+/* enum: The update was an invalid user configuration file. */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b
+/* enum: The write was to the AUTO partition but the data was not recognised as
+ * a valid partition.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_LEN 88
+/* Result of nvram update completion processing. Result codes that indicate an
+ * internal build failure and therefore not expected to be seen by customers in
+ * the field are marked with a prefix 'Internal-error'.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0 */
+/* enum: Verify succeeded without any errors. */
+/* MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1 */
+/* enum: CMS format verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2 */
+/* enum: Invalid CMS format in image metadata. */
+/* MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3 */
+/* enum: Message digest verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4 */
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5 */
+/* enum: Signature verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6 */
+/* enum: There are no valid signatures in the image. */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7 */
+/* enum: Trusted approvers verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8 */
+/* enum: The Trusted approver's list is empty. */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9 */
+/* enum: Signature chain verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa */
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb */
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc */
+/* enum: The image has a lower security level than the current firmware. */
+/* MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd */
+/* enum: Internal-error. The signed image is missing the 'contents' section,
+ * where the 'contents' section holds the actual image payload to be applied.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe */
+/* enum: Internal-error. The bundle header is invalid. */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf */
+/* enum: Internal-error. The bundle does not have a valid reflash image layout.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10 */
+/* enum: Internal-error. The bundle has an inconsistent layout of components or
+ * incorrect checksum.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11 */
+/* enum: Internal-error. The bundle manifest is inconsistent with components in
+ * the bundle.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12 */
+/* enum: Internal-error. The number of components in a bundle do not match the
+ * number of components advertised by the bundle manifest.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13 */
+/* enum: Internal-error. The bundle contains too many components for the MC
+ * firmware to process
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14 */
+/* enum: Internal-error. The bundle manifest has an invalid/inconsistent
+ * component.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15 */
+/* enum: Internal-error. The hash of a component does not match the hash stored
+ * in the bundle manifest.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16 */
+/* enum: Internal-error. Component hash calculation failed. */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17 */
+/* enum: Internal-error. The component does not have a valid reflash image
+ * layout.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18 */
+/* enum: The bundle processing code failed to copy a component to its target
+ * partition.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 */
+/* enum: The update operation is in-progress. */
+/* MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a */
+/* enum: The update was an invalid user configuration file. */
+/* MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b */
+/* enum: The write was to the AUTO partition but the data was not recognised as
+ * a valid partition.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c */
+/* If the update was a user configuration, what action(s) the user must take to
+ * apply the new configuration.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_LEN 4
+/* enum: No action required. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_NONE 0x0
+/* enum: The MC firmware must be rebooted (eg with MC_CMD_REBOOT). */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_REBOOT 0x1
+/* enum: The host must be rebooted. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_REBOOT 0x2
+/* enum: The firmware and host must be rebooted (in either order). */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_AND_HOST_REBOOT 0x3
+/* enum: The host must be fully powered off. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_POWERCYCLE 0x4
+/* If the update failed with MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG, a null-
+ * terminated US-ASCII string suitable for showing to the user.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_LEN 80
/***********************************/
@@ -7522,7 +8154,7 @@
#define MC_CMD_REBOOT 0x3d
#undef MC_CMD_0x3d_PRIVILEGE_CTG
-#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
@@ -7535,65 +8167,6 @@
/***********************************/
-/* MC_CMD_SCHEDINFO
- * Request scheduler info. Locks required: NONE. Returns: An array of
- * (timeslice,maximum overrun), one for each thread, in ascending order of
- * thread address.
- */
-#define MC_CMD_SCHEDINFO 0x3e
-#undef MC_CMD_0x3e_PRIVILEGE_CTG
-
-#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SCHEDINFO_IN msgrequest */
-#define MC_CMD_SCHEDINFO_IN_LEN 0
-
-/* MC_CMD_SCHEDINFO_OUT msgresponse */
-#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
-#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
-#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4)
-#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
-#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
-#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
-#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
-#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_REBOOT_MODE
- * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
- * mode to the specified value. Returns the old mode.
- */
-#define MC_CMD_REBOOT_MODE 0x3f
-#undef MC_CMD_0x3f_PRIVILEGE_CTG
-
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_REBOOT_MODE_IN msgrequest */
-#define MC_CMD_REBOOT_MODE_IN_LEN 4
-#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
-/* enum: Normal. */
-#define MC_CMD_REBOOT_MODE_NORMAL 0x0
-/* enum: Power-on Reset. */
-#define MC_CMD_REBOOT_MODE_POR 0x2
-/* enum: Snapper. */
-#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
-/* enum: snapper fake POR */
-#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
-#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0
-#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
-#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
-
-/* MC_CMD_REBOOT_MODE_OUT msgresponse */
-#define MC_CMD_REBOOT_MODE_OUT_LEN 4
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
-
-
-/***********************************/
/* MC_CMD_SENSOR_INFO
* Returns information about every available sensor.
*
@@ -8061,6 +8634,54 @@
/* MC_CMD_GET_PHY_STATE_IN msgrequest */
#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+/* MC_CMD_GET_PHY_STATE_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_V2_LEN 8
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details.
+ */
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
@@ -8072,22 +8693,6 @@
/***********************************/
-/* MC_CMD_SETUP_8021QBB
- * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
- * disable 802.Qbb for a given priority.
- */
-#define MC_CMD_SETUP_8021QBB 0x44
-
-/* MC_CMD_SETUP_8021QBB_IN msgrequest */
-#define MC_CMD_SETUP_8021QBB_IN_LEN 32
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
-
-/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
-#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_WOL_FILTER_GET
* Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
*/
@@ -8106,133 +8711,6 @@
/***********************************/
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
- * Add a protocol offload to NIC for lights-out state. Locks required: None.
- * Returns: 0, ENOSYS
- */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
-#undef MC_CMD_0x46_PRIVILEGE_CTG
-
-#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4)
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
- * Remove a protocol offload from NIC for lights-out state. Locks required:
- * None. Returns: 0, ENOSYS
- */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
-#undef MC_CMD_0x47_PRIVILEGE_CTG
-
-#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
-
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
-
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_MAC_RESET_RESTORE
- * Restore MAC after block reset. Locks required: None. Returns: 0.
- */
-#define MC_CMD_MAC_RESET_RESTORE 0x48
-
-/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
-#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
-
-/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
-#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TESTASSERT
- * Deliberately trigger an assert-detonation in the firmware for testing
- * purposes (i.e. to allow tests that the driver copes gracefully). Locks
- * required: None Returns: 0
- */
-#define MC_CMD_TESTASSERT 0x49
-#undef MC_CMD_0x49_PRIVILEGE_CTG
-
-#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_TESTASSERT_IN msgrequest */
-#define MC_CMD_TESTASSERT_IN_LEN 0
-
-/* MC_CMD_TESTASSERT_OUT msgresponse */
-#define MC_CMD_TESTASSERT_OUT_LEN 0
-
-/* MC_CMD_TESTASSERT_V2_IN msgrequest */
-#define MC_CMD_TESTASSERT_V2_IN_LEN 4
-/* How to provoke the assertion */
-#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
-#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
-/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
- * you're testing firmware, this is what you want.
- */
-#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
-/* enum: Assert using assert(0); */
-#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
-/* enum: Deliberately trigger a watchdog */
-#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
-/* enum: Deliberately trigger a trap by loading from an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
-/* enum: Deliberately trigger a trap by storing to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
-/* enum: Jump to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
-
-/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
-#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_WORKAROUND
* Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
* understand the given workaround number - which should not be treated as a
@@ -8324,6 +8802,62 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_LBN 16
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_WIDTH 16
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_LEN 12
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_LBN 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_WIDTH 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_LBN 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_WIDTH 16
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LBN 64
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 7
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 52
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 48
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 6
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LBN 64
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
@@ -8348,7 +8882,7 @@
#define MC_CMD_NVRAM_TEST 0x4c
#undef MC_CMD_0x4c_PRIVILEGE_CTG
-#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
@@ -8370,103 +8904,6 @@
/***********************************/
-/* MC_CMD_MRSFP_TWEAK
- * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
- * I2C I/O expander bits are always read; if equaliser parameters are supplied,
- * they are configured first. Locks required: None. Return code: 0, EINVAL.
- */
-#define MC_CMD_MRSFP_TWEAK 0x4d
-
-/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
-/* 0-6 low->high de-emph. */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
-/* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
-/* 0-8 0-8 low->high boost */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
-/* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
-
-/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
-#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
-
-/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
-#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
-/* input bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
-/* output bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
-/* direction */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
-/* enum: Out. */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
-/* enum: In. */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
-
-
-/***********************************/
-/* MC_CMD_SENSOR_SET_LIMS
- * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
- * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
- * of range.
- */
-#define MC_CMD_SENSOR_SET_LIMS 0x4e
-#undef MC_CMD_0x4e_PRIVILEGE_CTG
-
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
-
-/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
-#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_RESOURCE_LIMITS
- */
-#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
-
-/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
-#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
-
-/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
-
-
-/***********************************/
/* MC_CMD_NVRAM_PARTITIONS
* Reads the list of available virtual NVRAM partition types. Locks required:
* none. Returns: 0, EINVAL (bad type).
@@ -8582,806 +9019,6 @@
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
-
-/***********************************/
-/* MC_CMD_CLP
- * Perform a CLP related operation, see SF-110495-PS for details of CLP
- * processing. This command has been extended to accomodate the requirements of
- * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC,
- * SF-120509-TC and SF-117282-PS.
- */
-#define MC_CMD_CLP 0x56
-#undef MC_CMD_0x56_PRIVILEGE_CTG
-
-#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_CLP_IN msgrequest */
-#define MC_CMD_CLP_IN_LEN 4
-/* Sub operation */
-#define MC_CMD_CLP_IN_OP_OFST 0
-#define MC_CMD_CLP_IN_OP_LEN 4
-/* enum: Return to factory default settings */
-#define MC_CMD_CLP_OP_DEFAULT 0x1
-/* enum: Set MAC address */
-#define MC_CMD_CLP_OP_SET_MAC 0x2
-/* enum: Get MAC address */
-#define MC_CMD_CLP_OP_GET_MAC 0x3
-/* enum: Set UEFI/GPXE boot mode */
-#define MC_CMD_CLP_OP_SET_BOOT 0x4
-/* enum: Get UEFI/GPXE boot mode */
-#define MC_CMD_CLP_OP_GET_BOOT 0x5
-
-/* MC_CMD_CLP_OUT msgresponse */
-#define MC_CMD_CLP_OUT_LEN 0
-
-/* MC_CMD_CLP_IN_DEFAULT msgrequest */
-#define MC_CMD_CLP_IN_DEFAULT_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
-#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
-
-/* MC_CMD_CLP_IN_SET_MAC msgrequest */
-#define MC_CMD_CLP_IN_SET_MAC_LEN 12
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
- * restores the permanent (factory-programmed) MAC address associated with the
- * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
- */
-#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
-#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
-#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
-
-/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
-#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
-
-/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */
-#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
- * restores the permanent (factory-programmed) MAC address associated with the
- * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
- */
-#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4
-#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10
-#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2
-#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12
-#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1
-
-/* MC_CMD_CLP_IN_GET_MAC msgrequest */
-#define MC_CMD_CLP_IN_GET_MAC_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */
-#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1
-
-/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
-#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
-/* MAC address assigned to port */
-#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
-#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
-#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
-
-/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
-#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* Boot flag */
-#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
-#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
-
-/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
-#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
-
-/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
-#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
-#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
-/* Boot flag */
-#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
-#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
-/* Padding */
-#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
-#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
-
-
-/***********************************/
-/* MC_CMD_MUM
- * Perform a MUM operation
- */
-#define MC_CMD_MUM 0x57
-#undef MC_CMD_0x57_PRIVILEGE_CTG
-
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_MUM_IN msgrequest */
-#define MC_CMD_MUM_IN_LEN 4
-#define MC_CMD_MUM_IN_OP_HDR_OFST 0
-#define MC_CMD_MUM_IN_OP_HDR_LEN 4
-#define MC_CMD_MUM_IN_OP_OFST 0
-#define MC_CMD_MUM_IN_OP_LBN 0
-#define MC_CMD_MUM_IN_OP_WIDTH 8
-/* enum: NULL MCDI command to MUM */
-#define MC_CMD_MUM_OP_NULL 0x1
-/* enum: Get MUM version */
-#define MC_CMD_MUM_OP_GET_VERSION 0x2
-/* enum: Issue raw I2C command to MUM */
-#define MC_CMD_MUM_OP_RAW_CMD 0x3
-/* enum: Read from registers on devices connected to MUM. */
-#define MC_CMD_MUM_OP_READ 0x4
-/* enum: Write to registers on devices connected to MUM. */
-#define MC_CMD_MUM_OP_WRITE 0x5
-/* enum: Control UART logging. */
-#define MC_CMD_MUM_OP_LOG 0x6
-/* enum: Operations on MUM GPIO lines */
-#define MC_CMD_MUM_OP_GPIO 0x7
-/* enum: Get sensor readings from MUM */
-#define MC_CMD_MUM_OP_READ_SENSORS 0x8
-/* enum: Initiate clock programming on the MUM */
-#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
-/* enum: Initiate FPGA load from flash on the MUM */
-#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
-/* enum: Request sensor reading from MUM ADC resulting from earlier request via
- * MUM ATB
- */
-#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
-/* enum: Send commands relating to the QSFP ports via the MUM for PHY
- * operations
- */
-#define MC_CMD_MUM_OP_QSFP 0xc
-/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
- * level) from MUM
- */
-#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
-
-/* MC_CMD_MUM_IN_NULL msgrequest */
-#define MC_CMD_MUM_IN_NULL_LEN 4
-/* MUM cmd header */
-#define MC_CMD_MUM_IN_CMD_OFST 0
-#define MC_CMD_MUM_IN_CMD_LEN 4
-
-/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
-#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_IN_READ msgrequest */
-#define MC_CMD_MUM_IN_READ_LEN 16
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* ID of (device connected to MUM) to read from registers of */
-#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
-#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
-/* enum: Hittite HMC1035 clock generator on Sorrento board */
-#define MC_CMD_MUM_DEV_HITTITE 0x1
-/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
-#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
-/* 32-bit address to read from */
-#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
-#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
-/* Number of words to read. */
-#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
-#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
-
-/* MC_CMD_MUM_IN_WRITE msgrequest */
-#define MC_CMD_MUM_IN_WRITE_LENMIN 16
-#define MC_CMD_MUM_IN_WRITE_LENMAX 252
-#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
-#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4)
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* ID of (device connected to MUM) to write to registers of */
-#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
-#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
-/* enum: Hittite HMC1035 clock generator on Sorrento board */
-/* MC_CMD_MUM_DEV_HITTITE 0x1 */
-/* 32-bit address to write to */
-#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
-#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
-/* Words to write */
-#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
-#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252
-
-/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
-#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
-#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
-#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1)
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* MUM I2C cmd code */
-#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
-#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
-/* Number of bytes to write */
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
-/* Number of bytes to read */
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
-/* Bytes to write */
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004
-
-/* MC_CMD_MUM_IN_LOG msgrequest */
-#define MC_CMD_MUM_IN_LOG_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_LOG_OP_OFST 4
-#define MC_CMD_MUM_IN_LOG_OP_LEN 4
-#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
-
-/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
-#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
-/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
-/* Enable/disable debug output to UART */
-#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
-#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO msgrequest */
-#define MC_CMD_MUM_IN_GPIO_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
-#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
-#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
-
-/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
-/* The first 32-bit word to be written to the GPIO OUT register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
-/* The second 32-bit word to be written to the GPIO OUT register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
-/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
-/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
-
-/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
-#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
-
-/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* Bit-mask of clocks to be programmed */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
-#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
-#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
-#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
-/* Control flags for clock programming */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
-
-/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
-#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* Enable/Disable FPGA config from flash */
-#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
-#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
-
-/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
-#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_IN_QSFP msgrequest */
-#define MC_CMD_MUM_IN_QSFP_LEN 12
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4
-#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
-#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
-#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
-#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
-#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
-#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_OUT msgresponse */
-#define MC_CMD_MUM_OUT_LEN 0
-
-/* MC_CMD_MUM_OUT_NULL msgresponse */
-#define MC_CMD_MUM_OUT_NULL_LEN 0
-
-/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
-#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
-#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
-#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LBN 32
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_WIDTH 32
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LBN 64
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_WIDTH 32
-
-/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1)
-/* returned data */
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020
-
-/* MC_CMD_MUM_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_READ_LENMIN 4
-#define MC_CMD_MUM_OUT_READ_LENMAX 252
-#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
-#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4)
-#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
-#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
-#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
-#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
-#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255
-
-/* MC_CMD_MUM_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_LOG msgresponse */
-#define MC_CMD_MUM_OUT_LOG_LEN 0
-
-/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
-#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
-/* The first 32-bit word read from the GPIO IN register. */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
-/* The second 32-bit word read from the GPIO IN register. */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
-/* The first 32-bit word read from the GPIO OUT register. */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
-/* The second 32-bit word read from the GPIO OUT register. */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
-
-/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4)
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
-
-/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
-
-/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
-#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
-
-/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
-
-/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
-
-/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1)
-/* in bytes */
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016
-
-/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
-
-/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8)
-/* Discrete (soldered) DDR resistor strap info */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
-/* Number of SODIMM info records */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
-/* Array of SODIMM info records */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LBN 64
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_WIDTH 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LBN 96
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_WIDTH 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
-/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
-/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
-/* enum: Total number of SODIMM banks */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
-/* enum: Values 5-15 are reserved for future usage */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
-/* enum: No module present */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
-/* enum: Module present supported and powered on */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
-/* enum: Module present but bad type */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
-/* enum: Module present but incompatible voltage */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
-/* enum: Module present but unknown SPD */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
-/* enum: Module present but slot cannot support it */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
-/* enum: Modules may or may not be present, but cannot establish contact by I2C
- */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
-
/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This
* should match the equivalent structure in the sensor_query SPHINX service.
*/
@@ -9500,27 +9137,22 @@
* and a generation count for this version of the sensor table. On systems
* advertising the DYNAMIC_SENSORS capability bit, this replaces the
* MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors
- * added by the NMC.
- *
- * Sensor handles are persistent for the lifetime of the sensor and are used to
- * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
- * MC_CMD_DYNAMIC_SENSORS_GET_VALUES.
- *
- * The generation count is maintained by the MC, is persistent across reboots
- * and will be incremented each time the sensor table is modified. When the
- * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated
- * containing the new generation count. The driver should compare this against
- * the current generation count, and if it is different, call
- * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table.
- *
- * The sensor count is provided to allow a future path to supporting more than
+ * added by the NMC. Sensor handles are persistent for the lifetime of the
+ * sensor and are used to identify sensors in
+ * MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. The generation count is maintained by the
+ * MC, is persistent across reboots and will be incremented each time the
+ * sensor table is modified. When the table is modified, a
+ * CODE_DYNAMIC_SENSORS_CHANGE event will be generated containing the new
+ * generation count. The driver should compare this against the current
+ * generation count, and if it is different, call MC_CMD_DYNAMIC_SENSORS_LIST
+ * again to update it's copy of the sensor table. The sensor count is provided
+ * to allow a future path to supporting more than
* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e.
* the maximum number that will fit in a single response. As this is a fairly
* large number (253) it is not anticipated that this will be needed in the
- * near future, so can currently be ignored.
- *
- * On Riverhead this command is implemented as a wrapper for `list` in the
- * sensor_query SPHINX service.
+ * near future, so can currently be ignored. On Riverhead this command is
+ * implemented as a wrapper for `list` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
#undef MC_CMD_0x66_PRIVILEGE_CTG
@@ -9557,15 +9189,13 @@
/***********************************/
/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS
* Get descriptions for a set of sensors, specified as an array of sensor
- * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
- *
- * Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
- * update is in progress, and effectively means the set of usable sensors is
- * the intersection between the sets of sensors known to the driver and the MC.
- *
- * On Riverhead this command is implemented as a wrapper for
- * `get_descriptions` in the sensor_query SPHINX service.
+ * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. Any handles which do not
+ * correspond to a sensor currently managed by the MC will be dropped from from
+ * the response. This may happen when a sensor table update is in progress, and
+ * effectively means the set of usable sensors is the intersection between the
+ * sets of sensors known to the driver and the MC. On Riverhead this command is
+ * implemented as a wrapper for `get_descriptions` in the sensor_query SPHINX
+ * service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
#undef MC_CMD_0x67_PRIVILEGE_CTG
@@ -9602,19 +9232,15 @@
/***********************************/
/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS
* Read the state and value for a set of sensors, specified as an array of
- * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST.
- *
- * In the case of a broken sensor, then the state of the response's
- * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value
- * provided should be treated as erroneous.
- *
- * Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
- * update is in progress, and effectively means the set of usable sensors is
- * the intersection between the sets of sensors known to the driver and the MC.
- *
- * On Riverhead this command is implemented as a wrapper for `get_readings`
- * in the sensor_query SPHINX service.
+ * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. In the case of a
+ * broken sensor, then the state of the response's MC_CMD_DYNAMIC_SENSORS_VALUE
+ * entry will be set to BROKEN, and any value provided should be treated as
+ * erroneous. Any handles which do not correspond to a sensor currently managed
+ * by the MC will be dropped from from the response. This may happen when a
+ * sensor table update is in progress, and effectively means the set of usable
+ * sensors is the intersection between the sets of sensors known to the driver
+ * and the MC. On Riverhead this command is implemented as a wrapper for
+ * `get_readings` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
#undef MC_CMD_0x68_PRIVILEGE_CTG
@@ -9647,45 +9273,1286 @@
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85
+/* MC_CMD_MAC_FLAGS structuredef */
+#define MC_CMD_MAC_FLAGS_LEN 4
+/* The enums defined in this field are used as indices into the
+ * MC_CMD_MAC_FLAGS bitmask.
+ */
+#define MC_CMD_MAC_FLAGS_MASK_OFST 0
+#define MC_CMD_MAC_FLAGS_MASK_LEN 4
+/* enum property: bitshift */
+/* enum: Include the FCS in the packet data delivered to the host. Ignored if
+ * RX_INCLUDE_FCS not set in capabilities.
+ */
+#define MC_CMD_MAC_FLAGS_FLAG_INCLUDE_FCS 0x0
+#define MC_CMD_MAC_FLAGS_MASK_LBN 0
+#define MC_CMD_MAC_FLAGS_MASK_WIDTH 32
+
+/* MC_CMD_TRANSMISSION_MODE structuredef */
+#define MC_CMD_TRANSMISSION_MODE_LEN 4
+#define MC_CMD_TRANSMISSION_MODE_MASK_OFST 0
+#define MC_CMD_TRANSMISSION_MODE_MASK_LEN 4
+/* enum property: value */
+#define MC_CMD_TRANSMISSION_MODE_PROMSC_MODE 0x0 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_UNCST_MODE 0x1 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_BRDCST_MODE 0x2 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_MASK_LBN 0
+#define MC_CMD_TRANSMISSION_MODE_MASK_WIDTH 32
+
+/* MC_CMD_MAC_CONFIG_OPTIONS structuredef */
+#define MC_CMD_MAC_CONFIG_OPTIONS_LEN 4
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_OFST 0
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LEN 4
+/* enum property: bitmask */
+/* enum: Configure the MAC address. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_ADDR 0x0
+/* enum: Configure the maximum frame length. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_MAX_FRAME_LEN 0x1
+/* enum: Configure flow control. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_FCNTL 0x2
+/* enum: Configure the transmission mode. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_TRANSMISSION_MODE 0x3
+/* enum: Configure FCS. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_INCLUDE_FCS 0x4
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LBN 0
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MAC_CTRL
+ * Set MAC configuration. Return code: 0, EINVAL, ENOTSUP
+ */
+#define MC_CMD_MAC_CTRL 0x1df
+#undef MC_CMD_0x1df_PRIVILEGE_CTG
+
+#define MC_CMD_0x1df_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_MAC_CTRL_IN msgrequest */
+#define MC_CMD_MAC_CTRL_IN_LEN 32
+/* Handle for selected network port. */
+#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_LEN 4
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set.
+ */
+#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_OFST 4
+#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_CTRL_IN_ADDR_OFST 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LEN 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_OFST 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LEN 4
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LBN 64
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_OFST 12
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LEN 4
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LBN 96
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_WIDTH 32
+/* Includes the ethernet header, optional VLAN tags, payload and FCS. */
+#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_OFST 16
+#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_LEN 4
+/* Settings for flow control. */
+#define MC_CMD_MAC_CTRL_IN_FCNTL_OFST 20
+#define MC_CMD_MAC_CTRL_IN_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast
+ * mode.
+ */
+#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* Flags to control and expand the configuration of the MAC. */
+#define MC_CMD_MAC_CTRL_IN_FLAGS_OFST 28
+#define MC_CMD_MAC_CTRL_IN_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+
+/* MC_CMD_MAC_CTRL_IN_V2 msgrequest: Updated MAC_CTRL with QBB mask */
+#define MC_CMD_MAC_CTRL_IN_V2_LEN 33
+/* Handle for selected network port. */
+#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_LEN 4
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set.
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_OFST 4
+#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_OFST 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LEN 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_OFST 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LEN 4
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LBN 64
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_OFST 12
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LEN 4
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LBN 96
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_WIDTH 32
+/* Includes the ethernet header, optional VLAN tags, payload and FCS. */
+#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_OFST 16
+#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_LEN 4
+/* Settings for flow control. */
+#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_OFST 20
+#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast
+ * mode.
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* Flags to control and expand the configuration of the MAC. */
+#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_OFST 28
+#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+/* Priority-based flow control mask (QBB). PRIO7 corresponds to the highest
+ * priority, and PRIO0 to the lowest. This field is only used when CFG_FCNTL is
+ * set and FCNTL is QBB
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_OFST 32
+#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_LEN 1
+/* enum property: bitmask */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO0 0x0 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO1 0x1 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO2 0x2 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO3 0x3 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO4 0x4 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO5 0x5 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO6 0x6 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO7 0x7 /* enum */
+
+/* MC_CMD_MAC_CTRL_OUT msgresponse */
+#define MC_CMD_MAC_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_STATE
+ * Read the MAC state. Return code: 0, ETIME.
+ */
+#define MC_CMD_MAC_STATE 0x1e0
+#undef MC_CMD_0x1e0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e0_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_MAC_STATE_IN msgrequest */
+#define MC_CMD_MAC_STATE_IN_LEN 4
+/* Handle for selected network port. */
+#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_MAC_STATE_OUT msgresponse */
+#define MC_CMD_MAC_STATE_OUT_LEN 32
+/* The configured maximum frame length of the MAC. */
+#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_OFST 0
+#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_LEN 4
+/* This returns the negotiated flow control value. */
+#define MC_CMD_MAC_STATE_OUT_FCNTL_OFST 4
+#define MC_CMD_MAC_STATE_OUT_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_STATE_OUT_ADDR_OFST 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LEN 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_OFST 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LBN 64
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_OFST 12
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LBN 96
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_WIDTH 32
+/* Flags indicating MAC faults. */
+#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_OFST 16
+#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_LEN 4
+/* enum property: bitshift */
+/* enum: Indicates a local MAC fault. */
+#define MC_CMD_MAC_STATE_OUT_LOCAL 0x0
+/* enum: Indicates a remote MAC fault. */
+#define MC_CMD_MAC_STATE_OUT_REMOTE 0x1
+/* enum: Indicates a pending reconfiguration of the MAC. */
+#define MC_CMD_MAC_STATE_OUT_PENDING_RECONFIG 0x2
+/* The flags that were used to configure the MAC. This is a copy of the FLAGS
+ * field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_FLAGS_OFST 20
+#define MC_CMD_MAC_STATE_OUT_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+/* The transmission mode that was used to configure the MAC. This is a copy of
+ * the TRANSMISSION_MODE field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* The control flags that were used to configure the MAC. This is a copy of the
+ * CONTROL field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_OFST 28
+#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+
+
+/***********************************/
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE
+ * Obtain a handle that can be operated on to configure and query the status of
+ * the link. ENOENT is returned when no port is assigned to the client. Return
+ * code: 0, ENOENT
+ */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE 0x1e2
+#undef MC_CMD_0x1e2_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN msgrequest */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN_LEN 0
+
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT msgresponse */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_LEN 4
+/* Handle for assigned port. */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_LEN 4
+
+/* MC_CMD_STAT_ID structuredef */
+#define MC_CMD_STAT_ID_LEN 4
+#define MC_CMD_STAT_ID_SOURCE_ID_OFST 0
+#define MC_CMD_STAT_ID_SOURCE_ID_LEN 2
+/* enum property: index */
+/* enum: Internal markers (generation start and end markers) */
+#define MC_CMD_STAT_ID_MARKER 0x1
+/* enum: Network port MAC statistics. */
+#define MC_CMD_STAT_ID_MAC 0x2
+/* enum: Network port PHY statistics. */
+#define MC_CMD_STAT_ID_PHY 0x3
+#define MC_CMD_STAT_ID_SOURCE_ID_LBN 0
+#define MC_CMD_STAT_ID_SOURCE_ID_WIDTH 16
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: This value is used to mark the start of a generation of statistics for
+ * DMA synchronization. It is incremented whenever a new set of statistics is
+ * transferred. Always the first entry in the DMA buffer.
+ */
+#define MC_CMD_STAT_ID_GENERATION_START 0x1
+/* enum: This value is used to mark the end of a generation of statistics for
+ * DMA synchronizaion. Always the last entry in the DMA buffer and set to the
+ * same value as GENERATION_START. The host driver must compare the
+ * GENERATION_START and GENERATION_END values to verify that the DMA buffer is
+ * consistent upon copying the the DMA buffer. If they do not match, it means
+ * that new DMA transfer has started while the host driver was copying the DMA
+ * buffer. In this case, the host driver must repeat the copy operation.
+ */
+#define MC_CMD_STAT_ID_GENERATION_END 0x2
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_ID_MAC_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_MAC_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: Total number of packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_PKTS 0x1
+/* enum: Pause frames transmitted. */
+#define MC_CMD_STAT_ID_TX_PAUSE_PKTS 0x2
+/* enum: Control frames transmitted. */
+#define MC_CMD_STAT_ID_TX_CONTROL_PKTS 0x3
+/* enum: Unicast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_UNICAST_PKTS 0x4
+/* enum: Multicast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_MULTICAST_PKTS 0x5
+/* enum: Broadcast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_BROADCAST_PKTS 0x6
+/* enum: Bytes transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_BYTES 0x7
+/* enum: Bytes transmitted with bad CRC. */
+#define MC_CMD_STAT_ID_TX_BAD_BYTES 0x8
+/* enum: Bytes transmitted with good CRC. */
+#define MC_CMD_STAT_ID_TX_GOOD_BYTES 0x9
+/* enum: Packets transmitted with length less than 64 bytes. */
+#define MC_CMD_STAT_ID_TX_LT64_PKTS 0xa
+/* enum: Packets transmitted with length equal to 64 bytes. */
+#define MC_CMD_STAT_ID_TX_64_PKTS 0xb
+/* enum: Packets transmitted with length between 65 and 127 bytes. */
+#define MC_CMD_STAT_ID_TX_65_TO_127_PKTS 0xc
+/* enum: Packets transmitted with length between 128 and 255 bytes. */
+#define MC_CMD_STAT_ID_TX_128_TO_255_PKTS 0xd
+/* enum: Packets transmitted with length between 256 and 511 bytes. */
+#define MC_CMD_STAT_ID_TX_256_TO_511_PKTS 0xe
+/* enum: Packets transmitted with length between 512 and 1023 bytes. */
+#define MC_CMD_STAT_ID_TX_512_TO_1023_PKTS 0xf
+/* enum: Packets transmitted with length between 1024 and 1518 bytes. */
+#define MC_CMD_STAT_ID_TX_1024_TO_15XX_PKTS 0x10
+/* enum: Packets transmitted with length between 1519 and 9216 bytes. */
+#define MC_CMD_STAT_ID_TX_15XX_TO_JUMBO_PKTS 0x11
+/* enum: Packets transmitted with length greater than 9216 bytes. */
+#define MC_CMD_STAT_ID_TX_GTJUMBO_PKTS 0x12
+/* enum: Packets transmitted with bad FCS. */
+#define MC_CMD_STAT_ID_TX_BAD_FCS_PKTS 0x13
+/* enum: Packets transmitted with good FCS. */
+#define MC_CMD_STAT_ID_TX_GOOD_FCS_PKTS 0x14
+/* enum: Packets received. */
+#define MC_CMD_STAT_ID_RX_PKTS 0x15
+/* enum: Pause frames received. */
+#define MC_CMD_STAT_ID_RX_PAUSE_PKTS 0x16
+/* enum: Total number of good packets received. */
+#define MC_CMD_STAT_ID_RX_GOOD_PKTS 0x17
+/* enum: Total number of BAD packets received. */
+#define MC_CMD_STAT_ID_RX_BAD_PKTS 0x18
+/* enum: Total number of control frames received. */
+#define MC_CMD_STAT_ID_RX_CONTROL_PKTS 0x19
+/* enum: Total number of unicast packets received. */
+#define MC_CMD_STAT_ID_RX_UNICAST_PKTS 0x1a
+/* enum: Total number of multicast packets received. */
+#define MC_CMD_STAT_ID_RX_MULTICAST_PKTS 0x1b
+/* enum: Total number of broadcast packets received. */
+#define MC_CMD_STAT_ID_RX_BROADCAST_PKTS 0x1c
+/* enum: Total number of bytes received. */
+#define MC_CMD_STAT_ID_RX_BYTES 0x1d
+/* enum: Total number of bytes received with bad CRC. */
+#define MC_CMD_STAT_ID_RX_BAD_BYTES 0x1e
+/* enum: Total number of bytes received with GOOD CRC. */
+#define MC_CMD_STAT_ID_RX_GOOD_BYTES 0x1f
+/* enum: Packets received with length equal to 64 bytes. */
+#define MC_CMD_STAT_ID_RX_64_PKTS 0x20
+/* enum: Packets received with length between 65 and 127 bytes. */
+#define MC_CMD_STAT_ID_RX_65_TO_127_PKTS 0x21
+/* enum: Packets received with length between 128 and 255 bytes. */
+#define MC_CMD_STAT_ID_RX_128_TO_255_PKTS 0x22
+/* enum: Packets received with length between 256 and 511 bytes. */
+#define MC_CMD_STAT_ID_RX_256_TO_511_PKTS 0x23
+/* enum: Packets received with length between 512 and 1023 bytes. */
+#define MC_CMD_STAT_ID_RX_512_TO_1023_PKTS 0x24
+/* enum: Packets received with length between 1024 and 1518 bytes. */
+#define MC_CMD_STAT_ID_RX_1024_TO_15XX_PKTS 0x25
+/* enum: Packets received with length between 1519 and 9216 bytes. */
+#define MC_CMD_STAT_ID_RX_15XX_TO_JUMBO_PKTS 0x26
+/* enum: Packets received with length greater than 9216 bytes. */
+#define MC_CMD_STAT_ID_RX_GTJUMBO_PKTS 0x27
+/* enum: Packets received with length less than 64 bytes. */
+#define MC_CMD_STAT_ID_RX_UNDERSIZE_PKTS 0x28
+/* enum: Packets received with bad FCS. */
+#define MC_CMD_STAT_ID_RX_BAD_FCS_PKTS 0x29
+/* enum: Packets received with GOOD FCS. */
+#define MC_CMD_STAT_ID_RX_GOOD_FCS_PKTS 0x2a
+/* enum: Packets received with overflow. */
+#define MC_CMD_STAT_ID_RX_OVERFLOW_PKTS 0x2b
+/* enum: Packets received with symbol error. */
+#define MC_CMD_STAT_ID_RX_SYMBOL_ERROR_PKTS 0x2c
+/* enum: Packets received with alignment error. */
+#define MC_CMD_STAT_ID_RX_ALIGN_ERROR_PKTS 0x2d
+/* enum: Packets received with length error. */
+#define MC_CMD_STAT_ID_RX_LENGTH_ERROR_PKTS 0x2e
+/* enum: Packets received with internal error. */
+#define MC_CMD_STAT_ID_RX_INTERNAL_ERROR_PKTS 0x2f
+/* enum: Packets received with jabber. These packets are larger than the
+ * allowed maximum receive unit (MRU). This indicates that a packet either has
+ * a bad CRC or has an RX error.
+ */
+#define MC_CMD_STAT_ID_RX_JABBER_PKTS 0x30
+/* enum: Packets dropped due to having no descriptor. This is a datapath stat
+ */
+#define MC_CMD_STAT_ID_RX_NODESC_DROPS 0x31
+/* enum: Packets received with lanes 0 and 1 character error. */
+#define MC_CMD_STAT_ID_RX_LANES01_CHAR_ERR 0x32
+/* enum: Packets received with lanes 2 and 3 character error. */
+#define MC_CMD_STAT_ID_RX_LANES23_CHAR_ERR 0x33
+/* enum: Packets received with lanes 0 and 1 disparity error. */
+#define MC_CMD_STAT_ID_RX_LANES01_DISP_ERR 0x34
+/* enum: Packets received with lanes 2 and 3 disparity error. */
+#define MC_CMD_STAT_ID_RX_LANES23_DISP_ERR 0x35
+/* enum: Packets received with match fault. */
+#define MC_CMD_STAT_ID_RX_MATCH_FAULT 0x36
+#define MC_CMD_STAT_ID_MAC_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_MAC_STAT_ID_WIDTH 16
+/* Include FEC stats. */
+#define MC_CMD_STAT_ID_PHY_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_PHY_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only from Medford2
+ * onwards)
+ */
+#define MC_CMD_STAT_ID_FEC_UNCORRECTED_ERRORS 0x1
+/* enum: Number of corrected FEC codewords on link (RS-FEC only from Medford2
+ * onwards)
+ */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_ERRORS 0x2
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE0 0x3
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE1 0x4
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE2 0x5
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE3 0x6
+#define MC_CMD_STAT_ID_PHY_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_PHY_STAT_ID_WIDTH 16
+
+/* MC_CMD_STAT_DESC structuredef: Structure describing the layout and size of
+ * the stats DMA buffer descriptor.
+ */
+#define MC_CMD_STAT_DESC_LEN 8
+/* Unique identifier of the statistic. Formatted as MC_CMD_STAT_ID */
+#define MC_CMD_STAT_DESC_STAT_ID_OFST 0
+#define MC_CMD_STAT_DESC_STAT_ID_LEN 4
+#define MC_CMD_STAT_DESC_STAT_ID_LBN 0
+#define MC_CMD_STAT_DESC_STAT_ID_WIDTH 32
+/* See structuredef: MC_CMD_STAT_ID */
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_OFST 0
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LBN 0
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_WIDTH 16
+/* Index of the statistic in the DMA buffer. */
+#define MC_CMD_STAT_DESC_STAT_INDEX_OFST 4
+#define MC_CMD_STAT_DESC_STAT_INDEX_LEN 2
+#define MC_CMD_STAT_DESC_STAT_INDEX_LBN 32
+#define MC_CMD_STAT_DESC_STAT_INDEX_WIDTH 16
+/* Reserved for future extension (e.g. flags field) - currently always 0. */
+#define MC_CMD_STAT_DESC_RESERVED_OFST 6
+#define MC_CMD_STAT_DESC_RESERVED_LEN 2
+#define MC_CMD_STAT_DESC_RESERVED_LBN 48
+#define MC_CMD_STAT_DESC_RESERVED_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR
+ * Get a list of descriptors that describe the layout and size of the stats
+ * buffer required for retrieving statistics for a given port. Each entry in
+ * the list is formatted as MC_CMD_STAT_DESC and provides the ID of each stat
+ * and its location and size in the buffer. It also gives the overall minimum
+ * size of the DMA buffer required when DMA mode is used. Note that the first
+ * and last entries in the list are reserved for the generation start
+ * (MC_CMD_MARKER_STAT_GENERATION_START) and end
+ * (MC_CMD_MARKER_STAT_GENERATION_END) markers respectively, to be used for DMA
+ * synchronisation as described in the documentation for the relevant enum
+ * entries. The entries are present in the buffer even if DMA mode is not used.
+ * Provisions are made (but currently unused) for extending the size of the
+ * descriptors, extending the size of the list beyond the maximum MCDI response
+ * size, as well as the dynamic runtime updates of the list. Returns: 0 on
+ * success, ENOENT on non-existent port handle
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR 0x1e3
+#undef MC_CMD_0x1e3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN msgrequest */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_LEN 8
+/* Handle of port to get MAC statitstics descriptors for. */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_LEN 4
+/* Offset of the first entry to return, for cases where not all entries fit in
+ * the MCDI response. Should be set to 0 on initial request, and on subsequent
+ * requests updated by the number of entries already returned, as long as the
+ * MORE_ENTRIES flag is set.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_OFST 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_LEN 4
+
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT msgresponse */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMIN 28
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX 252
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LEN(num) (20+8*(num))
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_NUM(len) (((len)-20)/8)
+/* Generation number of the stats buffer. This is incremented each time the
+ * buffer is updated, and is used to verify the consistency of the buffer
+ * contents. Reserved for future extension (dynamic list updates). Currently
+ * always set to 0.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_OFST 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_LEN 4
+/* Minimum size of the DMA buffer required to retrieve all statistics for the
+ * port. This is the sum of the sizes of all the statistics, plus the size of
+ * the generation markers. Minimum buffer size in bytes required to fit all
+ * statistics. Drivers will typically round up this value to the granularity of
+ * the host DMA allocation units.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_OFST 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_OFST 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_OFST 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_LBN 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_WIDTH 1
+/* Size of the individual descriptor entry in the list. Determines the entry
+ * stride in the list. Currently always set to size of MC_CMD_STAT_DESC, larger
+ * values can be used in the future for extending the descriptor, by appending
+ * new data to the end of the existing structure.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_OFST 12
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_LEN 4
+/* Number of entries returned in the descriptor list. */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_OFST 16
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_LEN 4
+/* List of descriptors. Each entry is formatted as MC_CMD_STAT_DESC and
+ * provides the ID of each stat and its location and size in the buffer. The
+ * first and last entries are reserved for the generation start and end markers
+ * respectively.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_OFST 20
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LEN 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_OFST 20
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LBN 160
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_OFST 24
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LBN 192
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MINNUM 1
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM 29
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM_MCDI2 125
+
+
+/***********************************/
+/* MC_CMD_MAC_STATISTICS
+ * Get generic MAC statistics. This call retrieves unified statistics managed
+ * by the MC. The MC will populate and provide all supported statistics in the
+ * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the
+ * aforementioned command for the format and contents of the stats DMA buffer.
+ * To ensure consistent and accurate results, it is essential for the driver to
+ * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on
+ * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port
+ * handle, and EINVAL on invalid parameters (DMA buffer too small)
+ */
+#define MC_CMD_MAC_STATISTICS 0x1e4
+#undef MC_CMD_0x1e4_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATISTICS_IN msgrequest */
+#define MC_CMD_MAC_STATISTICS_IN_LEN 20
+/* Handle of port to get MAC statistics for. */
+#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_LEN 4
+/* Contains options for querying the MAC statistics. */
+#define MC_CMD_MAC_STATISTICS_IN_CMD_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATISTICS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_WIDTH 16
+/* This is the address of the DMA buffer to use for transfer of the statistics.
+ * Only valid if the DMA flag is set to 1.
+ */
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_OFST 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_OFST 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LBN 64
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_OFST 12
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LBN 96
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32
+/* This is the length of the DMA buffer to use for the transfer of the
+ * statistics. The buffer should be at least DMA_BUFFER_SIZE long, as returned
+ * by MC_CMD_MAC_STATISTICS_DESCRIPTOR. If the supplied buffer is too small,
+ * the command will fail with EINVAL. Only valid if the DMA flag is set to 1.
+ */
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_OFST 16
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_LEN 4
+
+/* MC_CMD_MAC_STATISTICS_OUT msgresponse */
+#define MC_CMD_MAC_STATISTICS_OUT_LENMIN 5
+#define MC_CMD_MAC_STATISTICS_OUT_LENMAX 252
+#define MC_CMD_MAC_STATISTICS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAC_STATISTICS_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_NUM(len) (((len)-4)/1)
+/* length of the data in bytes */
+#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_OFST 0
+#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_LEN 4
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_OFST 4
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_LEN 1
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MINNUM 1
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM 248
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM_MCDI2 1016
+
+/* NET_PORT_HANDLE_DESC structuredef: Network port descriptor containing a port
+ * handle and attributes used, for example, in MC_CMD_ENUM_PORTS.
+ */
+#define NET_PORT_HANDLE_DESC_LEN 53
+/* The handle to identify the port */
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_OFST 0
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LBN 0
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_WIDTH 32
+/* Includes the type of port e.g. physical, virtual or MAE MPORT and other
+ * properties relevant to the port.
+ */
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LEN 8
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LBN 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_WIDTH 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_OFST 8
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LBN 64
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_WIDTH 32
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_LBN 0
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_WIDTH 3
+#define NET_PORT_HANDLE_DESC_PHYSICAL 0x0 /* enum */
+#define NET_PORT_HANDLE_DESC_VIRTUAL 0x1 /* enum */
+#define NET_PORT_HANDLE_DESC_MPORT 0x2 /* enum */
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_OFST 4
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_LBN 8
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_WIDTH 1
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LBN 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_WIDTH 64
+/* The dynamic change that led to the port enumeration */
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_OFST 12
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LEN 1
+/* enum: Indicates that the ENTRY_SRC field has not been initialized. */
+#define NET_PORT_HANDLE_DESC_UNKNOWN 0x0
+/* enum: The port was enumerated at start of day. */
+#define NET_PORT_HANDLE_DESC_PRESENT 0x1
+/* enum: The port was dynamically added. */
+#define NET_PORT_HANDLE_DESC_ADDED 0x2
+/* enum: The port was dynamically deleted. */
+#define NET_PORT_HANDLE_DESC_DELETED 0x3
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LBN 96
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_WIDTH 8
+/* This is an opaque 40 byte label exposed to users as a unique identifier of
+ * the port. It is represented as a zero-terminated ASCII string and assigned
+ * by the port administrator which is typically either the firmware for a
+ * physical port or the host software responsible for creating the virtual
+ * port. The label is conveyed to the driver after assignment, which, unlike
+ * the port administrator, does not need to know how to interpret the label.
+ */
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_OFST 13
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_LEN 40
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_LBN 104
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_WIDTH 320
+
+
+/***********************************/
+/* MC_CMD_ENUM_PORTS
+ * This command returns handles for all ports present in the system. The PCIe
+ * function type of each port (either physical or virtual) is also reported.
+ * After a start-of-day port enumeration, firmware keeps track of all available
+ * ports upon creation or deletion and updates the ports if there is a change.
+ * This command is cleared after a control interface reset (e.g. FLR,
+ * ENTITY_RESET), in which case it must be called again to reenumerate the
+ * ports. The command is also clear-on-read and repeated calls will drain the
+ * buffer.
+ */
+#define MC_CMD_ENUM_PORTS 0x1e5
+#undef MC_CMD_0x1e5_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e5_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ENUM_PORTS_IN msgrequest */
+#define MC_CMD_ENUM_PORTS_IN_LEN 0
+
+/* MC_CMD_ENUM_PORTS_OUT msgresponse */
+#define MC_CMD_ENUM_PORTS_OUT_LENMIN 12
+#define MC_CMD_ENUM_PORTS_OUT_LENMAX 252
+#define MC_CMD_ENUM_PORTS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_ENUM_PORTS_OUT_LEN(num) (12+1*(num))
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_NUM(len) (((len)-12)/1)
+/* Any unused flags are reserved and must be ignored. */
+#define MC_CMD_ENUM_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_ENUM_PORTS_OUT_FLAGS_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_MORE_OFST 0
+#define MC_CMD_ENUM_PORTS_OUT_MORE_LBN 0
+#define MC_CMD_ENUM_PORTS_OUT_MORE_WIDTH 1
+/* The number of NET_PORT_HANDLE_DESC structures in PORT_HANDLES. */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_OFST 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_OFST 8
+#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_LEN 4
+/* Array of NET_PORT_HANDLE_DESC structures. Callers must use must use the
+ * SIZEOF_NET_PORT_HANDLE_DESC field field as the array stride between entries.
+ * This may also allow for tail padding for alignment. Fields beyond
+ * SIZEOF_NET_PORT_HANDLE_DESC are not present.
+ */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_OFST 12
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_LEN 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MINNUM 0
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM 240
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM_MCDI2 1008
+/* See structuredef: NET_PORT_HANDLE_DESC */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_OFST 12
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_OFST 16
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LEN 8
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_OFST 16
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LBN 128
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_WIDTH 32
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_OFST 20
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LBN 160
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_WIDTH 32
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_LBN 128
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_WIDTH 3
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_LBN 136
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_WIDTH 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_OFST 24
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_LEN 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_OFST 25
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_LEN 40
+
+
+/***********************************/
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES
+ * Read properties of the transceiver associated with the port. Can be either
+ * for a fixed onboard transceiver or an inserted module. The returned data is
+ * interpreted from the transceiver hardware and may be fixed up by the
+ * firmware. Use MC_CMD_GET_MODULE_DATA to get raw undecoded data.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES 0x1e6
+#undef MC_CMD_0x1e6_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e6_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN msgrequest */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_LEN 4
+/* Handle to port to get transceiver properties from. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LEN 89
+/* Supported technology abilities. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_OFST 0
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Reserved for future expansion to accommodate future Ethernet technology
+ * expansion.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_OFST 16
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_LEN 16
+/* Preferred FEC modes. This is a function of the cable type and length. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_OFST 32
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* SFF-8042 code reported by the module. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_OFST 36
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_LEN 2
+/* Medium. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_OFST 38
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_LEN 1
+/* enum property: value */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_COPPER 0x1 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_OPTICAL 0x2 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BACKPLANE 0x3 /* enum */
+/* Identifies the tech */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_OFST 39
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_LEN 1
+/* enum property: value */
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 */
+/* enum: Ethernet over twisted-pair copper cables for distances up to 100
+ * meters.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASET 0x1
+/* enum: Ethernet over twin-axial, balanced copper cable. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CR 0x2
+/* enum: Ethernet over backplane for connections on the same board. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KX 0x3
+/* enum: Ethernet over a single backplane lane for connections between
+ * different boards.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KR 0x4
+/* enum: Ethernet over copper backplane. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KP 0x5
+/* enum: Ethernet over fiber optic. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASEX 0x6
+/* enum: Short range ethernet over multimode fiber optic (See IEEE 802.3 Clause
+ * 49 and 52).
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SR 0x7
+/* enum: Long range, extended range or far reach ethernet used with single mode
+ * fiber optics.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LR_ER_FR 0x8
+/* enum: Long reach multimode ethernet over multimode optical fiber. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LRM 0x9
+/* enum: Very short reach PAM4 ethernet over multimode optical fiber (see IEEE
+ * 802.3db).
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VR 0xa
+/* enum: BASE-R encoding and PAM4 over single-mode fiber with reach up to at
+ * least 500 meters (803.2 Clause 121 and 124)
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_DR 0xb
+/* String of the vendor name as intepreted by NMC firmware. NMC firmware
+ * applies workarounds for known buggy transceivers. The vendor name is
+ * presented as 16 bytes of ASCII characters padded with spaces. It can also be
+ * represented as 16 bytes of zeros if the field is unspecified for the
+ * connected module. See SFF-8472/CMIS specifications for details.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_OFST 40
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_NUM 16
+/* The vendor part number as intepreted by NMC firmware. The field is presented
+ * as 16 bytes of ASCII chars padded with spaces. It can also be 16 bytes of
+ * zeros if the field is unspecified for the connected module.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_OFST 56
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_NUM 16
+/* Serial number of the module presented as 16 bytes of ASCII characters padded
+ * with spaces. It can also be 16 bytes of zeros if the field is unspecified
+ * for the connected module. See SFF-8472/CMIS specifications for details.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_OFST 72
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_NUM 16
+/* This reports the number of module changes detected by the NMC firmware. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 88
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+
+
+/***********************************/
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES 0x1e7
+#undef MC_CMD_0x1e7_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e7_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_IN msgrequest: In this context, the port
+ * consists of the MAC and the PHY, and excludes any modules inserted into the
+ * cage. This information is fixed for a given board but not for a given ASIC.
+ * This command reports properties for the port as it is currently configured,
+ * and not its hardware capabilities, which can be better than the current
+ * configuration.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_LEN 4
+/* Handle to the port to from which to retreive properties */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LEN 36
+/* Supported capabilities of the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_LEN 25
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_OFST 16
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_OFST 20
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_OFST 24
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_LEN 1
+/* Number of lanes supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_OFST 25
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_LEN 1
+/* Bitmask of supported loopback modes. Where the response to this command
+ * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in
+ * preference to ensure that all available loopback modes are seen.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_OFST 26
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* This field serves as a cage index that uniquely identifies the cage to which
+ * the module is connected. This is useful when splitter cables that have
+ * multiple ports on a single cage are used.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_OFST 27
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_LEN 1
+/* This bitmask is used to specify the lanes within the cage identified by
+ * MDI_INDEX that are allocated to the port.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_OFST 28
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_LEN 1
+/* Maximum frame length supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_OFST 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_LEN 4
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2 msgresponse */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LEN 48
+/* Supported capabilities of the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_LEN 25
+/* Number of lanes supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_OFST 25
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_LEN 1
+/* Bitmask of supported loopback modes. Where the response to this command
+ * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in
+ * preference to ensure that all available loopback modes are seen.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_OFST 26
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* This field serves as a cage index that uniquely identifies the cage to which
+ * the module is connected. This is useful when splitter cables that have
+ * multiple ports on a single cage are used.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_OFST 27
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_LEN 1
+/* This bitmask is used to specify the lanes within the cage identified by
+ * MDI_INDEX that are allocated to the port.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_OFST 28
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_LEN 1
+/* Maximum frame length supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_OFST 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_LEN 4
+/* Bitmask of supported loopback modes. This field replaces the
+ * LOOPBACK_MODES_MASK field which is defined under version 1 of this command.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_OFST 40
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LEN 8
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_OFST 40
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LBN 320
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_WIDTH 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_OFST 44
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LBN 352
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+
+
+/***********************************/
+/* MC_CMD_GET_MODULE_DATA
+ * Read media-specific data from the PHY (e.g. SFP/SFP+ module ID information
+ * for SFP+ PHYs). This command returns raw data from the module's EEPROM and
+ * it is not interpreted by the MC. Use MC_CMD_GET_TRANSCEIVER_PROPERTIES to
+ * get interpreted data. Return code: 0, ENOENT
+ */
+#define MC_CMD_GET_MODULE_DATA 0x1e8
+#undef MC_CMD_0x1e8_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e8_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_MODULE_DATA_IN msgrequest */
+#define MC_CMD_GET_MODULE_DATA_IN_LEN 16
+/* Handle to identify the port from which to request module properties. */
+#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_LEN 4
+/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by
+ * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier
+ * access.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_LBN 32
+#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_WIDTH 7
+/* 0 if the page does not support banked access, non-zero otherwise. Non-zero
+ * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory
+ * region.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_BANK_OFST 6
+#define MC_CMD_GET_MODULE_DATA_IN_BANK_LEN 2
+/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is
+ * valid if OFFSET is in the range 80h - ffh.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_PAGE_OFST 8
+#define MC_CMD_GET_MODULE_DATA_IN_PAGE_LEN 2
+/* Offset in the range 00h - 7fh to access lower memory. Offset in the range
+ * 80h - ffh to access upper memory
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_OFST 10
+#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_OFST 12
+#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_LEN 4
+
+/* MC_CMD_GET_MODULE_DATA_IN_V2 msgrequest: Updated MC_CMD_GET_MODULE_DATA with
+ * 8-bit wide ADDRESSING field. This new field provides a correctly aligned
+ * container for the 7-bit DEVADDR field from V1, now renamed MODULE_ADDR, to
+ * ensure proper alignment.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LEN 16
+/* Handle to identify the port from which to request module properties. */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_LEN 4
+/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by
+ * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier
+ * access.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_LBN 32
+#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_WIDTH 7
+/* 0 if the page does not support banked access, non-zero otherwise. Non-zero
+ * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory
+ * region.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_OFST 6
+#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_LEN 2
+/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is
+ * valid if OFFSET is in the range 80h - ffh.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_OFST 8
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_LEN 2
+/* Offset in the range 00h - 7fh to access lower memory. Offset in the range
+ * 80h - ffh to access upper memory
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_OFST 10
+#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_OFST 12
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_LEN 4
+/* Container for 7 bit I2C addresses. */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_OFST 4
+#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_OFST 4
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_LBN 0
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_WIDTH 7
+
+/* MC_CMD_GET_MODULE_DATA_OUT msgresponse */
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMIN 5
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX 252
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_MODULE_DATA_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_NUM(len) (((len)-4)/1)
+/* length of the data in bytes */
+#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_LEN 4
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_OFST 4
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_LEN 1
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM 248
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM_MCDI2 1016
+
+/* EVENT_MASK structuredef */
+#define EVENT_MASK_LEN 4
+#define EVENT_MASK_TYPE_OFST 0
+#define EVENT_MASK_TYPE_LEN 4
+/* enum: PORT_LINKCHANGE event is enabled */
+#define EVENT_MASK_PORT_LINKCHANGE 0x0
+/* enum: PORT_MODULECHANGE event is enabled */
+#define EVENT_MASK_PORT_MODULECHANGE 0x1
+#define EVENT_MASK_TYPE_LBN 0
+#define EVENT_MASK_TYPE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_NETPORT_EVENTS_MASK
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK 0x1e9
+#undef MC_CMD_0x1e9_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e9_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_NETPORT_EVENTS_MASK_IN msgrequest: Enable or disable delivery of
+ * specified network port events for a given port identified by PORT_HANDLE. At
+ * start of day, or after any control interface reset (FLR, ENTITY_RESET,
+ * etc.), all event delivery is disabled for all ports associated with the
+ * control interface.
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_LEN 8
+/* Handle to port to set event delivery mask. */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4
+/* Bitmask of events to enable. Event delivery is enabled when corresponding
+ * bit is 1, disabled when 0.
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_OFST 4
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+/* MC_CMD_SET_NETPORT_EVENTS_MASK_OUT msgresponse */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_OUT_LEN 0
+
/***********************************/
-/* MC_CMD_EVENT_CTRL
- * Configure which categories of unsolicited events the driver expects to
- * receive (Riverhead).
- */
-#define MC_CMD_EVENT_CTRL 0x69
-#undef MC_CMD_0x69_PRIVILEGE_CTG
-
-#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_EVENT_CTRL_IN msgrequest */
-#define MC_CMD_EVENT_CTRL_IN_LENMIN 0
-#define MC_CMD_EVENT_CTRL_IN_LENMAX 252
-#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020
-#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num))
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4)
-/* Array of event categories for which the driver wishes to receive events. */
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255
-/* enum: Driver wishes to receive LINKCHANGE events. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0
-/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events.
- */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1
-/* enum: Driver wishes to receive receive errors. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2
-/* enum: Driver wishes to receive transmit errors. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3
-/* enum: Driver wishes to receive firmware alerts. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4
-/* enum: Driver wishes to receive reboot events. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5
-
-/* MC_CMD_EVENT_CTRL_OUT msgrequest */
-#define MC_CMD_EVENT_CTRL_OUT_LEN 0
+/* MC_CMD_GET_NETPORT_EVENTS_MASK
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK 0x1ea
+#undef MC_CMD_0x1ea_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ea_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_NETPORT_EVENTS_MASK_IN msgrequest: Get event delivery mask a
+ * given port identified by PORT_HANDLE.
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_LEN 4
+/* Handle to port to get event deliver mask for. */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_NETPORT_EVENTS_MASK_OUT msgresponse */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_LEN 4
+/* Bitmask of events enabled. Event delivery is enabled when corresponding bit
+ * is 1, disabled when 0.
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_OFST 0
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+
+/***********************************/
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS 0x1eb
+#undef MC_CMD_0x1eb_PRIVILEGE_CTG
+
+#define MC_CMD_0x1eb_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN msgrequest: Get network port events
+ * supported by the platform. Information returned is fixed for a given NIC
+ * platform.
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN_LEN 0
+
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT msgresponse */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_LEN 4
+/* Bitmask of events enabled. Event delivery is enabled when corresponding bit
+ * is 1, disabled when 0.
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_OFST 0
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+
+/***********************************/
+/* MC_CMD_GET_NETPORT_STATISTICS
+ * Get generic MAC statistics. This call retrieves unified statistics managed
+ * by the MC. The MC will populate and provide all supported statistics in the
+ * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the
+ * aforementioned command for the format and contents of the stats DMA buffer.
+ * To ensure consistent and accurate results, it is essential for the driver to
+ * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on
+ * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port
+ * handle, and EINVAL on invalid parameters (DMA buffer too small)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS 0x1fa
+#undef MC_CMD_0x1fa_PRIVILEGE_CTG
+
+#define MC_CMD_0x1fa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_NETPORT_STATISTICS_IN msgrequest */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_LEN 20
+/* Handle of port to get MAC statistics for. */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_LEN 4
+/* Contains options for querying the MAC statistics. */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LBN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_LBN 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_LBN 15
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_WIDTH 17
+/* Specifies the physical address of the DMA buffer to use for statistics
+ * transfer. This field must contain a valid address under either of these
+ * conditions: 1. DMA flag is set (immediate DMA requested) 2. Both
+ * PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_OFST 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_OFST 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LBN 64
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_OFST 12
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LBN 96
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32
+/* Specifies the length of the DMA buffer in bytes for statistics transfer. The
+ * buffer size must be at least DMA_BUFFER_SIZE bytes (as returned by
+ * MC_CMD_MAC_STATISTICS_DESCRIPTOR). Providing an insufficient buffer size
+ * will result in an EINVAL error. This field must contain a valid length under
+ * either of these conditions: 1. DMA flag is set (immediate DMA requested) 2.
+ * Both PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_OFST 16
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_LEN 4
+
+/* MC_CMD_GET_NETPORT_STATISTICS_OUT msgresponse */
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMIN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX 248
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX_MCDI2 1016
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_NUM(len) (((len)-0)/8)
+/* Statistics buffer. Zero-length if DMA mode is used. The statistics buffer is
+ * an array of 8-byte counter values, containing the generation start marker,
+ * stats counters, and generation end marker. The index of each counter in the
+ * array is reported by the MAC_STATISTICS_DESCRIPTOR command. The same layout
+ * is used for the DMA buffer for DMA mode stats.
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LEN 8
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LBN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LBN 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MINNUM 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM 31
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM_MCDI2 127
/* EVB_PORT_ID structuredef */
#define EVB_PORT_ID_LEN 4
@@ -9706,44 +10573,6 @@
#define EVB_PORT_ID_PORT_ID_LBN 0
#define EVB_PORT_ID_PORT_ID_WIDTH 32
-/* EVB_VLAN_TAG structuredef */
-#define EVB_VLAN_TAG_LEN 2
-/* The VLAN tag value */
-#define EVB_VLAN_TAG_VLAN_ID_LBN 0
-#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
-#define EVB_VLAN_TAG_MODE_LBN 12
-#define EVB_VLAN_TAG_MODE_WIDTH 4
-/* enum: Insert the VLAN. */
-#define EVB_VLAN_TAG_INSERT 0x0
-/* enum: Replace the VLAN if already present. */
-#define EVB_VLAN_TAG_REPLACE 0x1
-
-/* BUFTBL_ENTRY structuredef */
-#define BUFTBL_ENTRY_LEN 12
-/* the owner ID */
-#define BUFTBL_ENTRY_OID_OFST 0
-#define BUFTBL_ENTRY_OID_LEN 2
-#define BUFTBL_ENTRY_OID_LBN 0
-#define BUFTBL_ENTRY_OID_WIDTH 16
-/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
-#define BUFTBL_ENTRY_PGSZ_OFST 2
-#define BUFTBL_ENTRY_PGSZ_LEN 2
-#define BUFTBL_ENTRY_PGSZ_LBN 16
-#define BUFTBL_ENTRY_PGSZ_WIDTH 16
-/* the raw 64-bit address field from the SMC, not adjusted for page size */
-#define BUFTBL_ENTRY_RAWADDR_OFST 4
-#define BUFTBL_ENTRY_RAWADDR_LEN 8
-#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
-#define BUFTBL_ENTRY_RAWADDR_LO_LEN 4
-#define BUFTBL_ENTRY_RAWADDR_LO_LBN 32
-#define BUFTBL_ENTRY_RAWADDR_LO_WIDTH 32
-#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
-#define BUFTBL_ENTRY_RAWADDR_HI_LEN 4
-#define BUFTBL_ENTRY_RAWADDR_HI_LBN 64
-#define BUFTBL_ENTRY_RAWADDR_HI_WIDTH 32
-#define BUFTBL_ENTRY_RAWADDR_LBN 32
-#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
-
/* NVRAM_PARTITION_TYPE structuredef */
#define NVRAM_PARTITION_TYPE_LEN 2
#define NVRAM_PARTITION_TYPE_ID_OFST 0
@@ -9787,6 +10616,8 @@
#define NVRAM_PARTITION_TYPE_NMC_LOG 0x700
/* enum: Non-volatile log output of second core on dual-core device */
#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: RAM (volatile) log output partition */
+#define NVRAM_PARTITION_TYPE_RAM_LOG 0x702
/* enum: Device state dump output partition */
#define NVRAM_PARTITION_TYPE_DUMP 0x800
/* enum: Crash log partition for NMC firmware */
@@ -9923,6 +10754,16 @@
#define NVRAM_PARTITION_TYPE_SUC_SOC_CONFIG 0x1f07
/* enum: System-on-Chip update information. */
#define NVRAM_PARTITION_TYPE_SOC_UPDATE 0x2003
+/* enum: Virtual partition. Write-only. Writes will actually be sent to an
+ * appropriate partition (for instance BUNDLE if the data starts with the magic
+ * number for a bundle update), or discarded with an error if not recognised as
+ * a supported type.
+ */
+#define NVRAM_PARTITION_TYPE_AUTO 0x2100
+/* enum: MC/NMC (first stage) bootloader firmware. (For X4, see XN-202072-PS
+ * and XN-202084-SW section 3.1).
+ */
+#define NVRAM_PARTITION_TYPE_BOOTLOADER 0x2200
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -9981,116 +10822,6 @@
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
-/* LICENSED_FEATURES structuredef */
-#define LICENSED_FEATURES_LEN 8
-/* Bitmask of licensed firmware features */
-#define LICENSED_FEATURES_MASK_OFST 0
-#define LICENSED_FEATURES_MASK_LEN 8
-#define LICENSED_FEATURES_MASK_LO_OFST 0
-#define LICENSED_FEATURES_MASK_LO_LEN 4
-#define LICENSED_FEATURES_MASK_LO_LBN 0
-#define LICENSED_FEATURES_MASK_LO_WIDTH 32
-#define LICENSED_FEATURES_MASK_HI_OFST 4
-#define LICENSED_FEATURES_MASK_HI_LEN 4
-#define LICENSED_FEATURES_MASK_HI_LBN 32
-#define LICENSED_FEATURES_MASK_HI_WIDTH 32
-#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
-#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
-#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
-#define LICENSED_FEATURES_PIO_OFST 0
-#define LICENSED_FEATURES_PIO_LBN 1
-#define LICENSED_FEATURES_PIO_WIDTH 1
-#define LICENSED_FEATURES_EVQ_TIMER_OFST 0
-#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
-#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
-#define LICENSED_FEATURES_CLOCK_OFST 0
-#define LICENSED_FEATURES_CLOCK_LBN 3
-#define LICENSED_FEATURES_CLOCK_WIDTH 1
-#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0
-#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
-#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
-#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0
-#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
-#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
-#define LICENSED_FEATURES_RX_SNIFF_OFST 0
-#define LICENSED_FEATURES_RX_SNIFF_LBN 6
-#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
-#define LICENSED_FEATURES_TX_SNIFF_OFST 0
-#define LICENSED_FEATURES_TX_SNIFF_LBN 7
-#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
-#define LICENSED_FEATURES_MASK_LBN 0
-#define LICENSED_FEATURES_MASK_WIDTH 64
-
-/* LICENSED_V3_APPS structuredef */
-#define LICENSED_V3_APPS_LEN 8
-/* Bitmask of licensed applications */
-#define LICENSED_V3_APPS_MASK_OFST 0
-#define LICENSED_V3_APPS_MASK_LEN 8
-#define LICENSED_V3_APPS_MASK_LO_OFST 0
-#define LICENSED_V3_APPS_MASK_LO_LEN 4
-#define LICENSED_V3_APPS_MASK_LO_LBN 0
-#define LICENSED_V3_APPS_MASK_LO_WIDTH 32
-#define LICENSED_V3_APPS_MASK_HI_OFST 4
-#define LICENSED_V3_APPS_MASK_HI_LEN 4
-#define LICENSED_V3_APPS_MASK_HI_LBN 32
-#define LICENSED_V3_APPS_MASK_HI_WIDTH 32
-#define LICENSED_V3_APPS_ONLOAD_OFST 0
-#define LICENSED_V3_APPS_ONLOAD_LBN 0
-#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
-#define LICENSED_V3_APPS_PTP_OFST 0
-#define LICENSED_V3_APPS_PTP_LBN 1
-#define LICENSED_V3_APPS_PTP_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
-#define LICENSED_V3_APPS_SOLARSECURE_OFST 0
-#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
-#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
-#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0
-#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
-#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
-#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0
-#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
-#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
-#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0
-#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
-#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
-#define LICENSED_V3_APPS_DSHBRD_OFST 0
-#define LICENSED_V3_APPS_DSHBRD_LBN 14
-#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
-#define LICENSED_V3_APPS_SCATRD_OFST 0
-#define LICENSED_V3_APPS_SCATRD_LBN 15
-#define LICENSED_V3_APPS_SCATRD_WIDTH 1
-#define LICENSED_V3_APPS_MASK_LBN 0
-#define LICENSED_V3_APPS_MASK_WIDTH 64
-
/* LICENSED_V3_FEATURES structuredef */
#define LICENSED_V3_FEATURES_LEN 8
/* Bitmask of licensed firmware features */
@@ -10199,44 +10930,6 @@
#define RSS_MODE_HASH_SELECTOR_LBN 0
#define RSS_MODE_HASH_SELECTOR_WIDTH 8
-/* CTPIO_STATS_MAP structuredef */
-#define CTPIO_STATS_MAP_LEN 4
-/* The (function relative) VI number */
-#define CTPIO_STATS_MAP_VI_OFST 0
-#define CTPIO_STATS_MAP_VI_LEN 2
-#define CTPIO_STATS_MAP_VI_LBN 0
-#define CTPIO_STATS_MAP_VI_WIDTH 16
-/* The target bucket for the VI */
-#define CTPIO_STATS_MAP_BUCKET_OFST 2
-#define CTPIO_STATS_MAP_BUCKET_LEN 2
-#define CTPIO_STATS_MAP_BUCKET_LBN 16
-#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
-
-
-/***********************************/
-/* MC_CMD_READ_REGS
- * Get a dump of the MCPU registers
- */
-#define MC_CMD_READ_REGS 0x50
-#undef MC_CMD_0x50_PRIVILEGE_CTG
-
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_REGS_IN msgrequest */
-#define MC_CMD_READ_REGS_IN_LEN 0
-
-/* MC_CMD_READ_REGS_OUT msgresponse */
-#define MC_CMD_READ_REGS_OUT_LEN 308
-/* Whether the corresponding register entry contains a valid value */
-#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
-#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
-/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
- * fir, fp)
- */
-#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
-#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
-#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
-
/***********************************/
/* MC_CMD_INIT_EVQ
@@ -10640,25 +11333,6 @@
#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
-/* QUEUE_CRC_MODE structuredef */
-#define QUEUE_CRC_MODE_LEN 1
-#define QUEUE_CRC_MODE_MODE_LBN 0
-#define QUEUE_CRC_MODE_MODE_WIDTH 4
-/* enum: No CRC. */
-#define QUEUE_CRC_MODE_NONE 0x0
-/* enum: CRC Fiber channel over ethernet. */
-#define QUEUE_CRC_MODE_FCOE 0x1
-/* enum: CRC (digest) iSCSI header only. */
-#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
-/* enum: CRC (digest) iSCSI header and payload. */
-#define QUEUE_CRC_MODE_ISCSI 0x3
-/* enum: CRC Fiber channel over IP over ethernet. */
-#define QUEUE_CRC_MODE_FCOIPOE 0x4
-/* enum: CRC MPA. */
-#define QUEUE_CRC_MODE_MPA 0x5
-#define QUEUE_CRC_MODE_SPARE_LBN 4
-#define QUEUE_CRC_MODE_SPARE_WIDTH 4
-
/***********************************/
/* MC_CMD_INIT_RXQ
@@ -10827,6 +11501,9 @@
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
@@ -10933,6 +11610,9 @@
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
@@ -11068,6 +11748,9 @@
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4
@@ -11216,6 +11899,9 @@
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4
@@ -11610,320 +12296,6 @@
/* MC_CMD_PROXY_CMD_OUT msgresponse */
#define MC_CMD_PROXY_CMD_OUT_LEN 0
-/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
- * manage proxied requests
- */
-#define MC_PROXY_STATUS_BUFFER_LEN 16
-/* Handle allocated by the firmware for this proxy transaction */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
-/* enum: An invalid handle. */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
-/* The requesting physical function number */
-#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
-#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
-#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
-#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
-/* The requesting virtual function number. Set to VF_NULL if the target is a
- * PF.
- */
-#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
-#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
-#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
-#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
-/* The target function RID. */
-#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
-#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
-#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
-#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
-/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
-#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
-#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
-#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
-#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
-/* If a request is authorized rather than carried out by the host, this is the
- * elevated privilege mask granted to the requesting function.
- */
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_PROXY_CONFIGURE
- * Enable/disable authorization of MCDI requests from unprivileged functions by
- * a designated admin function
- */
-#define MC_CMD_PROXY_CONFIGURE 0x58
-#undef MC_CMD_0x58_PRIVILEGE_CTG
-
-#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
-#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
-#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REQUEST_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LBN 32
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LBN 64
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REPLY_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LBN 128
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LBN 160
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
- * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LBN 224
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LBN 256
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2, or zero if this buffer is not provided */
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
-/* Applies to all three buffers */
-#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
-#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
-/* A bit mask defining which MCDI operations may be proxied */
-#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
-#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
-
-/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REQUEST_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LBN 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LBN 64
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REPLY_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LBN 128
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LBN 160
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
- * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LBN 224
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LBN 256
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2, or zero if this buffer is not provided */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
-/* Applies to all three buffers */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
-/* A bit mask defining which MCDI operations may be proxied */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
-
-/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
-#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PROXY_COMPLETE
- * Tells FW that a requested proxy operation has either been completed (by
- * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
- * function that enabled proxying/authorization (by using
- * MC_CMD_PROXY_CONFIGURE).
- */
-#define MC_CMD_PROXY_COMPLETE 0x5f
-#undef MC_CMD_0x5f_PRIVILEGE_CTG
-
-#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
-#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
-#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
-#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
-#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
-#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
-/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
- * is stored in the REPLY_BUFF.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
-/* enum: The operation has been authorized. The originating function may now
- * try again.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
-/* enum: The operation has been declined. */
-#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
-/* enum: The authorization failed because the relevant application did not
- * respond in time.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
-#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
-#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
-
-/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
-#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_ALLOC_BUFTBL_CHUNK
- * Allocate a set of buffer table entries using the specified owner ID. This
- * operation allocates the required buffer table entries (and fails if it
- * cannot do so). The buffer table entries will initially be zeroed.
- */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
-#undef MC_CMD_0x87_PRIVILEGE_CTG
-
-#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
-/* Owner ID to use */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
-/* Size of buffer table pages to use, in bytes (note that only a few values are
- * legal on any specific hardware).
- */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
-
-/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
-/* Buffer table IDs for use in DMA descriptors. */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
- * Reprogram a set of buffer table entries in the specified chunk.
- */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
-#undef MC_CMD_0x88_PRIVILEGE_CTG
-
-#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8)
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
-/* ID */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
-/* Num entries */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
-/* Buffer table entry address */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LEN 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LBN 96
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_WIDTH 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LEN 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LBN 128
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_WIDTH 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
-
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_FREE_BUFTBL_CHUNK
- */
-#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
-#undef MC_CMD_0x89_PRIVILEGE_CTG
-
-#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
-
-/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
-#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
-
/***********************************/
/* MC_CMD_FILTER_OP
@@ -12822,6 +13194,10 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
/* enum: read the supported encapsulation types for the VNIC */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_TYPES 0x6
+/* enum: read the supported RX filter matches for low-latency queues (as
+ * allocated by MC_CMD_ALLOC_LL_QUEUES)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_LL_RX_MATCHES 0x7
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -12860,6 +13236,48 @@
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse:
+ * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* a version number representing the set of rule lookups that are implemented
+ * by the currently running firmware
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4
+/* enum: implements lookup sequences described in SF-114946-SW draft C */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+/* the number of nodes in the subnet map */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4
+/* the number of entries in one subnet map node */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4
+/* minimum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4
+/* maximum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4
+/* the number of entries in the local and remote port range maps */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4
+/* minimum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4
+/* maximum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4
+
/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is
* returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value
* OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the
@@ -12914,136 +13332,6 @@
/***********************************/
-/* MC_CMD_PARSER_DISP_RW
- * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
- * Please note that this interface is only of use to debug tools which have
- * knowledge of firmware and hardware data structures; nothing here is intended
- * for use by normal driver code. Note that although this command is in the
- * Admin privilege group, in tamperproof adapters, only read operations are
- * permitted.
- */
-#define MC_CMD_PARSER_DISP_RW 0xe5
-#undef MC_CMD_0xe5_PRIVILEGE_CTG
-
-#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
-#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
-/* identifies the target of the operation */
-#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
-#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
-/* enum: RX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
-/* enum: TX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine (with original metadata format). Deprecated; used only
- * by cmdclient as a fallback for very old Huntington firmware, and not
- * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
- * instead.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
-/* enum: Lookup engine (with requested metadata format) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
-/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
-/* enum: RX1 dispatcher CPU (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
-/* enum: Miscellaneous other state (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
-/* identifies the type of operation requested */
-#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
-/* enum: Read a word of DICPU DMEM or a LUE entry */
-#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
-/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on
- * tamperproof adapters.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
-/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not
- * permitted on tamperproof adapters.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
-/* data memory address (DICPU targets) or LUE index (LUE targets) */
-#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
-#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
-/* selector (for MISC_STATE target) */
-#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
-#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
-/* enum: Port to datapath mapping */
-#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
-/* value to write (for DMEM writes) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
-/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
-/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
-/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
-/* value to write (for LUE writes) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
-
-/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
-#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
-/* value read (for DMEM reads) */
-#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
-/* value read (for LUE reads) */
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
-/* up to 8 32-bit words of additional soft state from the LUE manager (the
- * exact content is firmware-dependent and intended only for debug use)
- */
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
-/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
-#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
-#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
-
-
-/***********************************/
-/* MC_CMD_GET_PF_COUNT
- * Get number of PFs on the device.
- */
-#define MC_CMD_GET_PF_COUNT 0xb6
-#undef MC_CMD_0xb6_PRIVILEGE_CTG
-
-#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PF_COUNT_IN msgrequest */
-#define MC_CMD_GET_PF_COUNT_IN_LEN 0
-
-/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
-#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
-/* Identifies the number of PFs on the device. */
-#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
-#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
-
-
-/***********************************/
-/* MC_CMD_SET_PF_COUNT
- * Set number of PFs on the device.
- */
-#define MC_CMD_SET_PF_COUNT 0xb7
-
-/* MC_CMD_SET_PF_COUNT_IN msgrequest */
-#define MC_CMD_SET_PF_COUNT_IN_LEN 4
-/* New number of PFs on the device. */
-#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
-#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
-
-/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
-#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_PORT_ASSIGNMENT
* Get port assignment for current PCI function.
*/
@@ -13069,25 +13357,6 @@
/***********************************/
-/* MC_CMD_SET_PORT_ASSIGNMENT
- * Set port assignment for current PCI function.
- */
-#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
-#undef MC_CMD_0xb9_PRIVILEGE_CTG
-
-#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
-/* Identifies the port assignment for this function. */
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
-
-/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
-#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_ALLOC_VIS
* Allocate VIs for current PCI function.
*/
@@ -13184,263 +13453,6 @@
/***********************************/
-/* MC_CMD_SET_SRIOV_CFG
- * Set SRIOV config for this PF.
- */
-#define MC_CMD_SET_SRIOV_CFG 0xbb
-#undef MC_CMD_0xbb_PRIVILEGE_CTG
-
-#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
-#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
-/* Number of VFs currently enabled. */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
-/* Max number of VFs before sriov stride and offset may need to be changed. */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
-#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
-#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
-/* RID offset of first VF from PF, or 0 for no change, or
- * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
- */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
-/* RID offset of each subsequent VF from the previous, 0 for no change, or
- * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
- */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
-
-/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
-#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_VI_ALLOC_INFO
- * Get information about number of VI's and base VI number allocated to this
- * function. This message is not available to dynamic clients created by
- * MC_CMD_CLIENT_ALLOC.
- */
-#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
-#undef MC_CMD_0x8d_PRIVILEGE_CTG
-
-#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
-#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
-
-/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
-/* The number of VIs allocated on this function */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
-/* The base absolute VI number allocated to this function. Required to
- * correctly interpret wakeup events.
- */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
-/* Function's port vi_shift value (always 0 on Huntington) */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DUMP_VI_STATE
- * For CmdClient use. Dump pertinent information on a specific absolute VI. The
- * VI must be owned by the calling client or one of its ancestors; usership of
- * the VI (as set by MC_CMD_SET_VI_USER) is not sufficient.
- */
-#define MC_CMD_DUMP_VI_STATE 0x8e
-#undef MC_CMD_0x8e_PRIVILEGE_CTG
-
-#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
-#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
-/* The VI number to query. */
-#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
-#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
-
-/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
-#define MC_CMD_DUMP_VI_STATE_OUT_LEN 100
-/* The PF part of the function owning this VI. */
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
-/* The VF part of the function owning this VI. */
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
-/* Base of VIs allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
-/* Count of VIs allocated to the owner function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
-/* Base interrupt vector allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
-/* Number of interrupt vectors allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
-/* Raw evq ptr table data. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LBN 96
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LBN 128
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_WIDTH 32
-/* Raw evq timer table data. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LBN 160
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LBN 192
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LBN 256
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LBN 288
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_WIDTH 32
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LBN 320
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LBN 352
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_WIDTH 32
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LBN 384
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LBN 416
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LBN 448
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LBN 480
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
-/* RXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LBN 512
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LBN 544
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_WIDTH 32
-/* RXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LBN 576
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LBN 608
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_WIDTH 32
-/* Reserved, currently 0. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LBN 640
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LBN 672
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LBN 704
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LBN 736
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
-/* Current user, as assigned by MC_CMD_SET_VI_USER. */
-#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_OFST 96
-#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_LEN 4
-
-
-/***********************************/
/* MC_CMD_ALLOC_PIOBUF
* Allocate a push I/O buffer for later use with a tx queue.
*/
@@ -13491,354 +13503,102 @@
/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
-/* VI number to get information for. */
+/* Queue handle, encodes queue type and VI number to get information for. */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
-/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
-/* Transaction processing steering hint 1 for use with the Rx Queue. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
-/* Transaction processing steering hint 2 for use with the Ev Queue. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
-/* Use Relaxed ordering model for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
-/* Use ID based ordering for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
-/* Set no snoop bit for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
-/* Enable TPH for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
+/* MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT msgresponse: This message has the same
+ * layout as GET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to
+ * simplify use in drivers
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_LEN 4
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_LEN 4
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_LBN 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_WIDTH 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_LBN 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_WIDTH 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_LBN 20
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_WIDTH 1
/***********************************/
/* MC_CMD_SET_VI_TLP_PROCESSING
* Set TLP steering and ordering information for a VI. The caller must have the
* GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or
- * an ancestor of the current user (see MC_CMD_SET_VI_USER).
+ * an ancestor of the current user (see MC_CMD_SET_VI_USER). Note that LL
+ * queues require this to be called after allocation but before initialisation
+ * of the queue. TLP options of a queue are fixed after queue is initialised,
+ * with the values set to current global value or they can be overriden using
+ * this command. At LL queue allocation, all overrides are cleared.
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
#undef MC_CMD_0xb1_PRIVILEGE_CTG
#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
-/* VI number to set information for. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
-/* Transaction processing steering hint 1 for use with the Rx Queue. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
-/* Transaction processing steering hint 2 for use with the Ev Queue. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
-/* Use Relaxed ordering model for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
-/* Use ID based ordering for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
-/* Set the no snoop bit for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
-/* Enable TPH for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
+/* MC_CMD_SET_VI_TLP_PROCESSING_V2_IN msgrequest: This message has the same
+ * layout as SET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to
+ * simplify use in drivers.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_LEN 8
+/* Queue handle, encodes queue type and VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_LEN 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_LEN 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_LBN 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_WIDTH 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_LBN 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_WIDTH 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_LBN 16
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_LBN 16
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_LBN 18
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_LBN 19
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_LBN 20
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_WIDTH 1
/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
/***********************************/
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
- * Get global PCIe steering and transaction processing configuration.
- */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
-#undef MC_CMD_0xbc_PRIVILEGE_CTG
-
-#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
-/* enum: MISC. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
-/* enum: IDO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
-/* enum: RO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
-/* enum: TPH Type. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
-
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
-/* Amalgamated TLP info word. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
-
-
-/***********************************/
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
- * Set global PCIe steering and transaction processing configuration.
- */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
-#undef MC_CMD_0xbd_PRIVILEGE_CTG
-
-#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
-/* Amalgamated TLP info word. */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
-
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SATELLITE_DOWNLOAD
- * Download a new set of images to the satellite CPUs from the host.
- */
-#define MC_CMD_SATELLITE_DOWNLOAD 0x91
-#undef MC_CMD_0x91_PRIVILEGE_CTG
-
-#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
- * are subtle, and so downloads must proceed in a number of phases.
- *
- * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
- *
- * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
- * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
- * be a checksum (a simple 32-bit sum) of the transferred data. An individual
- * download may be aborted using CHUNK_ID_ABORT.
- *
- * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
- * similar to PHASE_IMEMS.
- *
- * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
- *
- * After any error (a requested abort is not considered to be an error) the
- * sequence must be restarted from PHASE_RESET.
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX_MCDI2 1020
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_NUM(len) (((len)-16)/4)
-/* Download phase. (Note: the IDLE phase is used internally and is never valid
- * in a command from the host.)
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
-/* Target for download. (These match the blob numbers defined in
- * mc_flash_layout.h.)
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
-/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
-/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
-/* enum: Last chunk, containing checksum rather than data */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
-/* enum: Abort download of this item */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
-/* Length of this chunk in bytes */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
-/* Data for this chunk */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM_MCDI2 251
-
-/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
-/* Same as MC_CMD_ERR field, but included as 0 in success cases */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
-/* Extra status information */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
-/* enum: Code download OK, completed. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
-/* enum: Code download aborted as requested. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
-/* enum: Code download OK so far, send next chunk. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
-/* enum: Download phases out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
-/* enum: Bad target for this phase */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
-/* enum: Chunk ID out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
-/* enum: Chunk length zero or too large */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
-/* enum: Checksum was incorrect */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
-
-
-/***********************************/
/* MC_CMD_GET_CAPABILITIES
- * Get device capabilities.
- *
- * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
- * reference inherent device capabilities as opposed to current NVRAM config.
+ * Get device capabilities. This is supplementary to the MC_CMD_GET_BOARD_CFG
+ * command, and intended to reference inherent device capabilities as opposed
+ * to current NVRAM config.
*/
#define MC_CMD_GET_CAPABILITIES 0xbe
#undef MC_CMD_0xbe_PRIVILEGE_CTG
@@ -14490,7 +14250,10 @@
/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -14900,7 +14663,10 @@
/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -15335,7 +15101,10 @@
/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -15778,7 +15547,10 @@
/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16226,7 +15998,10 @@
/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16685,7 +16460,10 @@
/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16796,9 +16574,21 @@
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
@@ -17189,7 +16979,10 @@
/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -17300,9 +17093,21 @@
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -17707,7 +17512,10 @@
/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -17818,9 +17626,21 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -18260,7 +18080,10 @@
/* MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -18371,9 +18194,21 @@
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -18438,6 +18273,1182 @@
#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* MC_CMD_GET_CAPABILITIES_V11_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LEN 196
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* Number of available indirect memory maps. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V12_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LEN 204
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* Number of available indirect memory maps. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4
+/* Number of VIs available for external ports 4-7. Information for ports 0-3 is
+ * in NUM_VIS_PER_PORT in GET_CAPABILITIES_V2_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_OFST 196
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_NUM 4
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -18468,168 +19479,13 @@
* are not defined.
*/
#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_ALLOC
- * Allocate a pacer bucket (for qau rp or a snapper test)
+/* enum: MCDI command used for platform management. Typically, these commands
+ * are used for low-level operations directed at the platform as a whole (e.g.
+ * MMIO device enumeration) rather than individual functions and use a
+ * dedicated comms channel (e.g. RPmsg/IPI). May be handled by the same or
+ * different CPU as MCDI_MESSAGE_TYPE_MC.
*/
-#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
-#undef MC_CMD_0xb2_PRIVILEGE_CTG
-
-#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
-
-/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_FREE
- * Free a pacer bucket
- */
-#define MC_CMD_TCM_BUCKET_FREE 0xb3
-#undef MC_CMD_0xb3_PRIVILEGE_CTG
-
-#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
-
-/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_INIT
- * Initialise pacer bucket with a given rate
- */
-#define MC_CMD_TCM_BUCKET_INIT 0xb4
-#undef MC_CMD_0xb4_PRIVILEGE_CTG
-
-#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
-/* the rate in mbps */
-#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
-#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
-
-/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
-/* the rate in mbps */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
-/* the desired maximum fill level */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
-
-/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TCM_TXQ_INIT
- * Initialise txq in pacer with given options or set options
- */
-#define MC_CMD_TCM_TXQ_INIT 0xb5
-#undef MC_CMD_0xb5_PRIVILEGE_CTG
-
-#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
-#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
-/* the txq id */
-#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
-#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
-/* bitmask of the priority queues this txq is inserted into when inserted. */
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
-/* the reaction point (RP) bucket */
-#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
-#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
-/* an already reserved bucket (typically set to bucket associated with outer
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
-/* an already reserved bucket (typically set to bucket associated with inner
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
-/* the min bucket (typically for ETS/minimum bandwidth) */
-#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
-#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
-
-/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
-/* the txq id */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
-/* bitmask of the priority queues this txq is inserted into when inserted. */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
-/* the reaction point (RP) bucket */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
-/* an already reserved bucket (typically set to bucket associated with outer
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
-/* an already reserved bucket (typically set to bucket associated with inner
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
-/* the min bucket (typically for ETS/minimum bandwidth) */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
-
-/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
-#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM 0x2
/***********************************/
@@ -18740,27 +19596,6 @@
/***********************************/
-/* MC_CMD_VSWITCH_QUERY
- * read some config of v-switch. For now this command is an empty placeholder.
- * It may be used to check if a v-switch is connected to a given EVB port (if
- * not, then the command returns ENOENT).
- */
-#define MC_CMD_VSWITCH_QUERY 0x63
-#undef MC_CMD_0x63_PRIVILEGE_CTG
-
-#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
-#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
-/* The port to which the v-switch is connected. */
-#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
-#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_VPORT_ALLOC
* allocate a v-port.
*/
@@ -18936,28 +19771,6 @@
/***********************************/
-/* MC_CMD_VADAPTOR_GET_MAC
- * read the MAC address assigned to a v-adaptor.
- */
-#define MC_CMD_VADAPTOR_GET_MAC 0x5e
-#undef MC_CMD_0x5e_PRIVILEGE_CTG
-
-#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
-#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
-/* The port to which the v-adaptor is connected. */
-#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
-/* The MAC address assigned to this v-adaptor */
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
-
-
-/***********************************/
/* MC_CMD_VADAPTOR_QUERY
* read some config of v-adaptor.
*/
@@ -19014,86 +19827,6 @@
/***********************************/
-/* MC_CMD_RDWR_A64_REGIONS
- * Assign the 64 bit region addresses.
- */
-#define MC_CMD_RDWR_A64_REGIONS 0x9b
-#undef MC_CMD_0x9b_PRIVILEGE_CTG
-
-#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
-#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
-/* Write enable bits 0-3, set to write, clear to read. */
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
-
-/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
- * regardless of state of write bits in the request.
- */
-#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ONLOAD_STACK_ALLOC
- * Allocate an Onload stack ID.
- */
-#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
-#undef MC_CMD_0x9c_PRIVILEGE_CTG
-
-#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
-/* The handle of the owning upstream port */
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
-/* The handle of the new Onload stack */
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ONLOAD_STACK_FREE
- * Free an Onload stack ID.
- */
-#define MC_CMD_ONLOAD_STACK_FREE 0x9d
-#undef MC_CMD_0x9d_PRIVILEGE_CTG
-
-#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
-#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
-/* The handle of the Onload stack */
-#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
-
-/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
-#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_RSS_CONTEXT_ALLOC
* Allocate an RSS context.
*/
@@ -19305,93 +20038,6 @@
/***********************************/
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE
- * Write a portion of a selectable-size indirection table for an RSS context.
- * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the
- * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
- */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e
-#undef MC_CMD_0x13e_PRIVILEGE_CTG
-
-#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num))
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4)
-/* The handle of the RSS context */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4
-/* An array of index-value pairs to be written to the table. Structure is
- * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY.
- */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4
-/* The index of the table entry to be written. */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16
-/* The value to write into the table entry. */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16
-
-
-/***********************************/
-/* MC_CMD_RSS_CONTEXT_READ_TABLE
- * Read a portion of a selectable-size indirection table for an RSS context.
- * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the
- * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
- */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f
-#undef MC_CMD_0x13f_PRIVILEGE_CTG
-
-#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num))
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2)
-/* The handle of the RSS context */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4
-/* An array containing the indices of the entries to be read. */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508
-
-/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2)
-/* A buffer containing the requested entries read from the table. */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510
-
-
-/***********************************/
/* MC_CMD_RSS_CONTEXT_SET_FLAGS
* Set various control flags for an RSS context.
*/
@@ -19525,158 +20171,6 @@
/***********************************/
-/* MC_CMD_DOT1P_MAPPING_ALLOC
- * Allocate a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
-#undef MC_CMD_0xa4_PRIVILEGE_CTG
-
-#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
-/* The handle of the owning upstream port */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
-/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
- * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
- * referenced RSS contexts must span no more than this number.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping. This should be considered opaque to the
- * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
- * handle.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
-/* enum: guaranteed invalid .1p mapping handle value */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_FREE
- * Free a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
-#undef MC_CMD_0xa5_PRIVILEGE_CTG
-
-#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE
- * Set the mapping table for a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
-#undef MC_CMD_0xa6_PRIVILEGE_CTG
-
-#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
-/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
- * handle)
- */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
-
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE
- * Get the mapping table for a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
-#undef MC_CMD_0xa7_PRIVILEGE_CTG
-
-#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
-/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
- * handle)
- */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
-
-
-/***********************************/
-/* MC_CMD_GET_VECTOR_CFG
- * Get Interrupt Vector config for this PF.
- */
-#define MC_CMD_GET_VECTOR_CFG 0xbf
-#undef MC_CMD_0xbf_PRIVILEGE_CTG
-
-#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
-#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
-
-/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
-#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
-/* Base absolute interrupt vector number. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
-#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
-/* Number of interrupt vectors allocate to this PF. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
-/* Number of interrupt vectors to allocate per VF. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
-
-
-/***********************************/
-/* MC_CMD_SET_VECTOR_CFG
- * Set Interrupt Vector config for this PF.
- */
-#define MC_CMD_SET_VECTOR_CFG 0xc0
-#undef MC_CMD_0xc0_PRIVILEGE_CTG
-
-#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
-#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
-/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
- * let the system find a suitable base.
- */
-#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
-#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
-/* Number of interrupt vectors allocate to this PF. */
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
-/* Number of interrupt vectors to allocate per VF. */
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
-
-/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
-#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_VPORT_ADD_MAC_ADDRESS
* Add a MAC address to a v-port
*/
@@ -19810,124 +20304,6 @@
/***********************************/
-/* MC_CMD_EVB_PORT_QUERY
- * read some config of v-port.
- */
-#define MC_CMD_EVB_PORT_QUERY 0x62
-#undef MC_CMD_0x62_PRIVILEGE_CTG
-
-#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
-#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
-/* The handle of the v-port */
-#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
-#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
-
-/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
-#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
-/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
-#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
-#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
-/* The number of VLAN tags that may be used on a v-adaptor connected to this
- * EVB port.
- */
-#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
-#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DUMP_BUFTBL_ENTRIES
- * Dump buffer table entries, mainly for command client debug use. Dumps
- * absolute entries, and does not use chunk handles. All entries must be in
- * range, and used for q page mapping, Although the latter restriction may be
- * lifted in future.
- */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
-#undef MC_CMD_0xab_PRIVILEGE_CTG
-
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
-/* Index of the first buffer table entry. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
-/* Number of buffer table entries to dump. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
-
-/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_NUM(len) (((len)-0)/12)
-/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM_MCDI2 85
-
-
-/***********************************/
-/* MC_CMD_SET_RXDP_CONFIG
- * Set global RXDP configuration settings
- */
-#define MC_CMD_SET_RXDP_CONFIG 0xc1
-#undef MC_CMD_0xc1_PRIVILEGE_CTG
-
-#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
-#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
-#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
-/* enum: pad to 64 bytes */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
-/* enum: pad to 128 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
-/* enum: pad to 256 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
-
-/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_RXDP_CONFIG
- * Get global RXDP configuration settings
- */
-#define MC_CMD_GET_RXDP_CONFIG 0xc2
-#undef MC_CMD_0xc2_PRIVILEGE_CTG
-
-#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
-#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
-#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
-/* Enum values, see field(s): */
-/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
-
-
-/***********************************/
/* MC_CMD_GET_CLOCK
* Return the system and PDCPU clock frequencies.
*/
@@ -19950,210 +20326,6 @@
/***********************************/
-/* MC_CMD_SET_CLOCK
- * Control the system and DPCPU clock frequencies. Changes are lost reboot.
- */
-#define MC_CMD_SET_CLOCK 0xad
-#undef MC_CMD_0xad_PRIVILEGE_CTG
-
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_CLOCK_IN msgrequest */
-#define MC_CMD_SET_CLOCK_IN_LEN 28
-/* Requested frequency in MHz for system clock domain */
-#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
-/* enum: Leave the system clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for inter-core clock domain */
-#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
-/* enum: Leave the inter-core clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for DPCPU clock domain */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
-#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
-/* enum: Leave the DPCPU clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for PCS clock domain */
-#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
-#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
-/* enum: Leave the PCS clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for MC clock domain */
-#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
-#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
-/* enum: Leave the MC clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for rmon clock domain */
-#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
-#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
-/* enum: Leave the rmon clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for vswitch clock domain */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
-/* enum: Leave the vswitch clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
-
-/* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define MC_CMD_SET_CLOCK_OUT_LEN 28
-/* Resulting system frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
-#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
-/* enum: The system clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
-/* Resulting inter-core frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
-#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
-/* enum: The inter-core clock domain doesn't exist / isn't used */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
-/* Resulting DPCPU frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
-/* enum: The dpcpu clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
-/* Resulting PCS frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
-#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
-/* enum: The PCS clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
-/* Resulting MC frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
-#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
-/* enum: The MC clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
-/* Resulting rmon frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
-#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
-/* enum: The rmon clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
-/* Resulting vswitch frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
-/* enum: The vswitch clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
-
-
-/***********************************/
-/* MC_CMD_DPCPU_RPC
- * Send an arbitrary DPCPU message.
- */
-#define MC_CMD_DPCPU_RPC 0xae
-#undef MC_CMD_0xae_PRIVILEGE_CTG
-
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DPCPU_RPC_IN msgrequest */
-#define MC_CMD_DPCPU_RPC_IN_LEN 36
-#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
-/* enum: RxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
-/* enum: TxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
-/* enum: TxDPCPU1 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
-/* enum: RxDPCPU1 (Medford only) */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
-/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
- * DPCPU_RX0)
- */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
-/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
- * DPCPU_TX0)
- */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
-/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
- * initialised to zero
- */
-#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
-#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
-/* Register data to write. Only valid in write/write-read. */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
-/* Register address. */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
-
-/* MC_CMD_DPCPU_RPC_OUT msgresponse */
-#define MC_CMD_DPCPU_RPC_OUT_LEN 36
-#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
-#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
-/* DATA */
-#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
-#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
-
-
-/***********************************/
/* MC_CMD_TRIGGER_INTERRUPT
* Trigger an interrupt by prodding the BIU.
*/
@@ -20173,66 +20345,6 @@
/***********************************/
-/* MC_CMD_SHMBOOT_OP
- * Special operations to support (for now) shmboot.
- */
-#define MC_CMD_SHMBOOT_OP 0xe6
-#undef MC_CMD_0xe6_PRIVILEGE_CTG
-
-#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SHMBOOT_OP_IN msgrequest */
-#define MC_CMD_SHMBOOT_OP_IN_LEN 4
-/* Identifies the operation to perform */
-#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
-#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
-/* enum: Copy slave_data section to the slave core. (Greenport only) */
-#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
-
-/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
-#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_CAP_BLK_READ
- * Read multiple 64bit words from capture block memory
- */
-#define MC_CMD_CAP_BLK_READ 0xe7
-#undef MC_CMD_0xe7_PRIVILEGE_CTG
-
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CAP_BLK_READ_IN msgrequest */
-#define MC_CMD_CAP_BLK_READ_IN_LEN 12
-#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
-#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
-#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
-#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
-#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
-#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
-
-/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
-#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
-#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
-#define MC_CMD_CAP_BLK_READ_OUT_LENMAX_MCDI2 1016
-#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_NUM(len) (((len)-0)/8)
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LEN 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LBN 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_WIDTH 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LEN 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LBN 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_WIDTH 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127
-
-
-/***********************************/
/* MC_CMD_DUMP_DO
* Take a dump of the DUT state
*/
@@ -20380,34 +20492,6 @@
/***********************************/
-/* MC_CMD_SET_PSU
- * Adjusts power supply parameters. This is a warranty-voiding operation.
- * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
- * the parameter is out of range.
- */
-#define MC_CMD_SET_PSU 0xea
-#undef MC_CMD_0xea_PRIVILEGE_CTG
-
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_PSU_IN msgrequest */
-#define MC_CMD_SET_PSU_IN_LEN 12
-#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
-#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
-#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
-#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
-#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
-/* desired value, eg voltage in mV */
-#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
-#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
-
-/* MC_CMD_SET_PSU_OUT msgresponse */
-#define MC_CMD_SET_PSU_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_FUNCTION_INFO
* Get function information. PF and VF number.
*/
@@ -20448,7 +20532,7 @@
#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
#undef MC_CMD_0xed_PRIVILEGE_CTG
-#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
@@ -20458,137 +20542,13 @@
/***********************************/
-/* MC_CMD_UART_SEND_DATA
- * Send checksummed[sic] block of data over the uart. Response is a placeholder
- * should we wish to make this reliable; currently requests are fire-and-
- * forget.
- */
-#define MC_CMD_UART_SEND_DATA 0xee
-#undef MC_CMD_0xee_PRIVILEGE_CTG
-
-#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
-#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
-#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
-#define MC_CMD_UART_SEND_DATA_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_NUM(len) (((len)-16)/1)
-/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
-#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
-#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
-/* Offset at which to write the data */
-#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
-#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
-/* Length of data */
-#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
-#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
-#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM_MCDI2 1004
-
-/* MC_CMD_UART_SEND_DATA_IN msgresponse */
-#define MC_CMD_UART_SEND_DATA_IN_LEN 0
-
-
-/***********************************/
-/* MC_CMD_UART_RECV_DATA
- * Request checksummed[sic] block of data over the uart. Only a placeholder,
- * subject to change and not currently implemented.
- */
-#define MC_CMD_UART_RECV_DATA 0xef
-#undef MC_CMD_0xef_PRIVILEGE_CTG
-
-#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
-#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
-/* CRC32 over OFFSET, LENGTH, RESERVED */
-#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
-#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
-/* Offset from which to read the data */
-#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
-#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
-/* Length of data */
-#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
-#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
-#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
-
-/* MC_CMD_UART_RECV_DATA_IN msgresponse */
-#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
-#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
-#define MC_CMD_UART_RECV_DATA_IN_LENMAX_MCDI2 1020
-#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
-#define MC_CMD_UART_RECV_DATA_IN_DATA_NUM(len) (((len)-16)/1)
-/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
-#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
-#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
-/* Offset at which to write the data */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
-/* Length of data */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
-#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
-#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM_MCDI2 1004
-
-
-/***********************************/
-/* MC_CMD_READ_FUSES
- * Read data programmed into the device One-Time-Programmable (OTP) Fuses
- */
-#define MC_CMD_READ_FUSES 0xf0
-#undef MC_CMD_0xf0_PRIVILEGE_CTG
-
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_FUSES_IN msgrequest */
-#define MC_CMD_READ_FUSES_IN_LEN 8
-/* Offset in OTP to read */
-#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
-#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
-/* Length of data to read in bytes */
-#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
-#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
-
-/* MC_CMD_READ_FUSES_OUT msgresponse */
-#define MC_CMD_READ_FUSES_OUT_LENMIN 4
-#define MC_CMD_READ_FUSES_OUT_LENMAX 252
-#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Length of returned OTP data in bytes */
-#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
-#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
-/* Returned data */
-#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
-#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
-#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
-#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
-#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016
-
-
-/***********************************/
/* MC_CMD_KR_TUNE
* Get or set KR Serdes RXEQ and TX Driver settings
*/
#define MC_CMD_KR_TUNE 0xf1
#undef MC_CMD_0xf1_PRIVILEGE_CTG
-#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_KR_TUNE_IN msgrequest */
#define MC_CMD_KR_TUNE_IN_LENMIN 4
@@ -21138,262 +21098,6 @@
/***********************************/
-/* MC_CMD_PCIE_TUNE
- * Get or set PCIE Serdes RXEQ and TX Driver settings
- */
-#define MC_CMD_PCIE_TUNE 0xf2
-#undef MC_CMD_0xf2_PRIVILEGE_CTG
-
-#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PCIE_TUNE_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
-#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
-#define MC_CMD_PCIE_TUNE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_NUM(len) (((len)-4)/4)
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
-/* enum: Get current RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
-/* enum: Override RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
-/* enum: Get current TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
-/* enum: Override TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
-/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
-#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
-/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
- * caller should call this command repeatedly after starting eye plot, until no
- * more data is returned.
- */
-#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
-/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
-#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
-/* Arguments specific to the operation */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM_MCDI2 254
-
-/* MC_CMD_PCIE_TUNE_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
-/* enum: CTLE Boost (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
-/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
-/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
-/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
-/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
-/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
-/* enum: DFE DLev */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
-/* enum: Figure of Merit */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
-/* enum: CTLE EQ Capacitor (HF Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
-/* enum: CTLE EQ Resistor (DC Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4)
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TxMargin (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
-/* enum: TxSwing (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
-/* enum: De-emphasis coefficient C(-1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
-/* enum: De-emphasis coefficient C(0) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
-/* enum: De-emphasis coefficient C(+1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
-
-/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2)
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510
-
-/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
-
-/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */
-#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_LICENSING
* Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
* - not used for V3 licensing
@@ -21532,56 +21236,6 @@
/***********************************/
-/* MC_CMD_LICENSING_GET_ID_V3
- * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
- * partition - V3 licensing (Medford)
- */
-#define MC_CMD_LICENSING_GET_ID_V3 0xd1
-#undef MC_CMD_0xd1_PRIVILEGE_CTG
-
-#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
-#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
-
-/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1)
-/* type of license (eg 3) */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
-/* length of the license ID (in bytes) */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
-/* the unique license ID of the adapter */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012
-
-
-/***********************************/
-/* MC_CMD_MC2MC_PROXY
- * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
- * This will fail on a single-core system.
- */
-#define MC_CMD_MC2MC_PROXY 0xf4
-#undef MC_CMD_0xf4_PRIVILEGE_CTG
-
-#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MC2MC_PROXY_IN msgrequest */
-#define MC_CMD_MC2MC_PROXY_IN_LEN 0
-
-/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
-#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_LICENSED_APP_STATE
* Query the state of an individual licensed application. (Note that the actual
* state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
@@ -21610,424 +21264,6 @@
/***********************************/
-/* MC_CMD_GET_LICENSED_V3_APP_STATE
- * Query the state of an individual licensed application. (Note that the actual
- * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
- * operation or a reboot of the MC.) Used for V3 licensing (Medford)
- */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
-#undef MC_CMD_0xd2_PRIVILEGE_CTG
-
-#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
-/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
- * mask
- */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_WIDTH 32
-
-/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
-/* state of this application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
-/* enum: no (or invalid) license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
-/* enum: a valid license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
-
-
-/***********************************/
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
- * Query the state of an one or more licensed features. (Note that the actual
- * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
- * operation or a reboot of the MC.) Used for V3 licensing (Medford)
- */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
-#undef MC_CMD_0xd3_PRIVILEGE_CTG
-
-#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
-/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
- * more bits set
- */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_WIDTH 32
-
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
-/* states of these features - bit set for licensed, clear for not licensed */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_LICENSED_APP_OP
- * Perform an action for an individual licensed application - not used for V3
- * licensing.
- */
-#define MC_CMD_LICENSED_APP_OP 0xf6
-#undef MC_CMD_0xf6_PRIVILEGE_CTG
-
-#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
-#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
-#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4)
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
-/* enum: validate application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
-/* enum: mask application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
-/* arguments specific to this particular operation */
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253
-
-/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4)
-/* result specific to this particular operation */
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255
-
-/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
-/* validation challenge */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
-
-/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
-/* feature expiry (time_t) */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
-/* validation response */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
-
-/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
-/* flag */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
-
-/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LICENSED_V3_VALIDATE_APP
- * Perform validation for an individual licensed application - V3 licensing
- * (Medford)
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
-#undef MC_CMD_0xd4_PRIVILEGE_CTG
-
-#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
-/* challenge for validation (384 bits) */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
-/* application ID expressed as a single bit mask */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LEN 4
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LBN 384
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_WIDTH 32
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LEN 4
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LBN 416
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_WIDTH 32
-
-/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
-/* validation response to challenge in the form of ECDSA signature consisting
- * of two 384-bit integers, r and s, in big-endian order. The signature signs a
- * SHA-384 digest of a message constructed from the concatenation of the input
- * message and the remaining fields of this output message, e.g. challenge[48
- * bytes] ... expiry_time[4 bytes] ...
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
-/* application expiry time */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
-/* application expiry units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
-/* enum: expiry units are accounting units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
-/* enum: expiry units are calendar days */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
-/* base MAC address of the NIC stored in NVRAM (note that this is a constant
- * value for a given NIC regardless which function is calling, effectively this
- * is PF0 base MAC address)
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
-/* MAC address of v-adaptor associated with the client. If no such v-adapator
- * exists, then the field is filled with 0xFF.
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
-
-
-/***********************************/
-/* MC_CMD_LICENSED_V3_MASK_FEATURES
- * Mask features - V3 licensing (Medford)
- */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
-#undef MC_CMD_0xd5_PRIVILEGE_CTG
-
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
-/* mask to be applied to features to be changed */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LEN 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LBN 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_WIDTH 32
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LEN 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LBN 32
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_WIDTH 32
-/* whether to turn on or turn off the masked features */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
-/* enum: turn the features off */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
-/* enum: turn the features back on */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
-
-/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LICENSING_V3_TEMPORARY
- * Perform operations to support installation of a single temporary license in
- * the adapter, in addition to those found in the licensing partition. See
- * SF-116124-SW for an overview of how this could be used. The license is
- * stored in MC persistent data and so will survive a MC reboot, but will be
- * erased when the adapter is power cycled
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
-#undef MC_CMD_0xd6_PRIVILEGE_CTG
-
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
-/* operation code */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
-/* enum: install a new license, overwriting any existing temporary license.
- * This is an asynchronous operation owing to the time taken to validate an
- * ECDSA license
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
-/* enum: clear the license immediately rather than waiting for the next power
- * cycle
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
-/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
- * operation
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
-/* ECDSA license and signature */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
-/* status code */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
-/* enum: finished validating and installing license */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
-/* enum: license validation and installation in progress */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
-/* enum: licensing error. More specific error messages are not provided to
- * avoid exposing details of the licensing system to the client
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
-/* bitmask of licensed features */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LBN 32
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_WIDTH 32
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LBN 64
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure RX port sniffing for the physical port associated with the calling
- * function. Only a privileged function may change the port sniffing
- * configuration. A copy of all traffic delivered to the host (non-promiscuous
- * mode) or all traffic arriving at the port (promiscuous mode) may be
- * delivered to a specific queue, or a set of queues with RSS.
- */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
-#undef MC_CMD_0xf7_PRIVILEGE_CTG
-
-#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
-/* configuration flags */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
-/* receive queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
-/* enum: receive to just the specified queue */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
-/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
- * that these handles should be considered opaque to the host, although a value
- * of 0xFFFFFFFF is guaranteed never to be a valid handle.
- */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
-
-/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current RX port sniffing configuration for the physical port
- * associated with the calling function. Only a privileged function may read
- * the configuration.
- */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
-#undef MC_CMD_0xf8_PRIVILEGE_CTG
-
-#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
-/* configuration flags */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
-/* receiving queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
-/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
-/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
-
-
-/***********************************/
/* MC_CMD_SET_PARSER_DISP_CONFIG
* Change configuration related to the parser-dispatcher subsystem.
*/
@@ -22073,305 +21309,6 @@
/***********************************/
-/* MC_CMD_GET_PARSER_DISP_CONFIG
- * Read configuration related to the parser-dispatcher subsystem.
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
-#undef MC_CMD_0xfa_PRIVILEGE_CTG
-
-#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
-/* the type of configuration setting to read */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
-/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
- * the type of configuration setting being read
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
-
-/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4)
-/* current value: the details depend on the type of configuration setting being
- * read
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
- * Configure TX port sniffing for the physical port associated with the calling
- * function. Only a privileged function may change the port sniffing
- * configuration. A copy of all traffic transmitted through the port may be
- * delivered to a specific queue, or a set of queues with RSS. Note that these
- * packets are delivered with transmit timestamps in the packet prefix, not
- * receive timestamps, so it is likely that the queue(s) will need to be
- * dedicated as TX sniff receivers.
- */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
-#undef MC_CMD_0xfb_PRIVILEGE_CTG
-
-#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
-/* configuration flags */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
-/* receive queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
-/* enum: receive to just the specified queue */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
-/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
- * that these handles should be considered opaque to the host, although a value
- * of 0xFFFFFFFF is guaranteed never to be a valid handle.
- */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
-
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
- * Obtain the current TX port sniffing configuration for the physical port
- * associated with the calling function. Only a privileged function may read
- * the configuration.
- */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
-#undef MC_CMD_0xfc_PRIVILEGE_CTG
-
-#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
-/* configuration flags */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
-/* receiving queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
-/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
-/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_RMON_STATS_RX_ERRORS
- * Per queue rx error stats.
- */
-#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
-#undef MC_CMD_0xfe_PRIVILEGE_CTG
-
-#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
-/* The rx queue to get stats for. */
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_PCIE_RESOURCE_INFO
- * Find out about available PCIE resources
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
-#undef MC_CMD_0xfd_PRIVILEGE_CTG
-
-#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
-
-/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
-/* The maximum number of PFs the device can expose */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
-/* The maximum number of VFs the device can expose in total */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
-/* The maximum number of MSI-X vectors the device can provide in total */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
-/* the number of MSI-X vectors the device will allocate by default to each PF
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
-/* the number of MSI-X vectors the device will allocate by default to each VF
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
-/* the maximum number of MSI-X vectors the device can allocate to any one PF */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
-/* the maximum number of MSI-X vectors the device can allocate to any one VF */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_PORT_MODES
- * Find out about available port modes
- */
-#define MC_CMD_GET_PORT_MODES 0xff
-#undef MC_CMD_0xff_PRIVILEGE_CTG
-
-#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PORT_MODES_IN msgrequest */
-#define MC_CMD_GET_PORT_MODES_IN_LEN 0
-
-/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
-#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
- * that are supported for customer use in production firmware.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
-#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
-/* Default (canonical) board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
-#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
-/* Current board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
-#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
-
-/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
- * that are supported for customer use in production firmware.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0
-#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4
-/* Default (canonical) board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4
-#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4
-/* Current board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8
-#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4
-/* Bitmask of engineering port modes available on the board (indexed by
- * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
- * contains all modes implemented in firmware for a particular board. Modes
- * listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
- * should be considered hidden (not to be exposed in userland tools) and for
- * engineering use only. There are no other semantic differences and any mode
- * listed in either MODES or ENGINEERING_MODES can be set on the board.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12
-#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4
-
-
-/***********************************/
-/* MC_CMD_OVERRIDE_PORT_MODE
- * Override flash config port mode for subsequent MC reboot(s). Override data
- * is stored in the presistent data section of DMEM and activated on next MC
- * warm reboot. A cold reboot resets the override. It is assumed that a
- * sufficient number of PFs are available and that port mapping is valid for
- * the new port mode, as the override does not affect PF configuration.
- */
-#define MC_CMD_OVERRIDE_PORT_MODE 0x137
-#undef MC_CMD_0x137_PRIVILEGE_CTG
-
-#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1
-/* New mode (TLV_PORT_MODE_*) to set, if override enabled */
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4
-
-/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */
-#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_READ_ATB
- * Sample voltages on the ATB
- */
-#define MC_CMD_READ_ATB 0x100
-#undef MC_CMD_0x100_PRIVILEGE_CTG
-
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_ATB_IN msgrequest */
-#define MC_CMD_READ_ATB_IN_LEN 16
-#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
-#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
-#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
-#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
-#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
-#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
-#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
-#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
-#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
-
-/* MC_CMD_READ_ATB_OUT msgresponse */
-#define MC_CMD_READ_ATB_OUT_LEN 4
-#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
-#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
-
-
-/***********************************/
/* MC_CMD_GET_WORKAROUNDS
* Read the list of all implemented and all currently enabled workarounds. The
* enums here must correspond with those in MC_CMD_WORKAROUND.
@@ -22538,447 +21475,6 @@
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
-
-/***********************************/
-/* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
- * parameter to MC_CMD_INIT_RXQ.
- */
-#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
-#undef MC_CMD_0x101_PRIVILEGE_CTG
-
-#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
-
-/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
-/* Minimum acceptable snapshot length. */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
-/* Maximum acceptable snapshot length. */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
-
-
-/***********************************/
-/* MC_CMD_FUSE_DIAGS
- * Additional fuse diagnostics
- */
-#define MC_CMD_FUSE_DIAGS 0x102
-#undef MC_CMD_0x102_PRIVILEGE_CTG
-
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_FUSE_DIAGS_IN msgrequest */
-#define MC_CMD_FUSE_DIAGS_IN_LEN 0
-
-/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
-#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
-/* Total number of mismatched bits between pairs in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
-/* Total number of mismatched bits between pairs in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
-/* Total number of mismatched bits between pairs in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PRIVILEGE_MODIFY
- * Modify the privileges of a set of PCIe functions. Note that this operation
- * only effects non-admin functions unless the admin privilege itself is
- * included in one of the masks provided.
- */
-#define MC_CMD_PRIVILEGE_MODIFY 0x60
-#undef MC_CMD_0x60_PRIVILEGE_CTG
-
-#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
-/* The groups of functions to have their privilege masks modified. */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
-/* For VFS_OF_PF specify the PF, for ONE specify the target function */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
-/* Privileges to be added to the target functions. For privilege definitions
- * refer to the command MC_CMD_PRIVILEGE_MASK
- */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
-/* Privileges to be removed from the target functions. For privilege
- * definitions refer to the command MC_CMD_PRIVILEGE_MASK
- */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
-#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
-
-/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
-#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_READ_BYTES
- * Read XPM memory
- */
-#define MC_CMD_XPM_READ_BYTES 0x103
-#undef MC_CMD_0x103_PRIVILEGE_CTG
-
-#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
-#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
-#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
-#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_NUM(len) (((len)-0)/1)
-/* Data */
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM_MCDI2 1020
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_BYTES
- * Write XPM memory
- */
-#define MC_CMD_XPM_WRITE_BYTES 0x104
-#undef MC_CMD_0x104_PRIVILEGE_CTG
-
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_NUM(len) (((len)-8)/1)
-/* Start address (byte) */
-#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
-#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
-#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
-/* Data */
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM_MCDI2 1012
-
-/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_READ_SECTOR
- * Read XPM sector
- */
-#define MC_CMD_XPM_READ_SECTOR 0x105
-#undef MC_CMD_0x105_PRIVILEGE_CTG
-
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
-/* Sector index */
-#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
-/* Sector size */
-#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
-#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
-
-/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX_MCDI2 36
-#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Sector type */
-#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
-/* Sector data */
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM_MCDI2 32
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_SECTOR
- * Write XPM sector
- */
-#define MC_CMD_XPM_WRITE_SECTOR 0x106
-#undef MC_CMD_0x106_PRIVILEGE_CTG
-
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX_MCDI2 44
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_NUM(len) (((len)-12)/1)
-/* If writing fails due to an uncorrectable error, try up to RETRIES following
- * sectors (or until no more space available). If 0, only one write attempt is
- * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
- * mechanism.
- */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
-/* Sector type */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
-#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
-/* Sector size */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
-#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
-/* Sector data */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM_MCDI2 32
-
-/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
-/* New sector index */
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
-
-
-/***********************************/
-/* MC_CMD_XPM_INVALIDATE_SECTOR
- * Invalidate XPM sector
- */
-#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
-#undef MC_CMD_0x107_PRIVILEGE_CTG
-
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
-/* Sector index */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
-
-/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_BLANK_CHECK
- * Blank-check XPM memory and report bad locations
- */
-#define MC_CMD_XPM_BLANK_CHECK 0x108
-#undef MC_CMD_0x108_PRIVILEGE_CTG
-
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
-#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
-#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
-#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_NUM(len) (((len)-4)/2)
-/* Total number of bad (non-blank) locations */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
-/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
- * into MCDI response)
- */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM_MCDI2 508
-
-
-/***********************************/
-/* MC_CMD_XPM_REPAIR
- * Blank-check and repair XPM memory
- */
-#define MC_CMD_XPM_REPAIR 0x109
-#undef MC_CMD_0x109_PRIVILEGE_CTG
-
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_REPAIR_IN msgrequest */
-#define MC_CMD_XPM_REPAIR_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
-#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
-#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_REPAIR_OUT msgresponse */
-#define MC_CMD_XPM_REPAIR_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_DECODER_TEST
- * Test XPM memory address decoders for gross manufacturing defects. Can only
- * be performed on an unprogrammed part.
- */
-#define MC_CMD_XPM_DECODER_TEST 0x10a
-#undef MC_CMD_0x10a_PRIVILEGE_CTG
-
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
-#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
-
-/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
-#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_TEST
- * XPM memory write test. Test XPM write logic for gross manufacturing defects
- * by writing to a dedicated test row. There are 16 locations in the test row
- * and the test can only be performed on locations that have not been
- * previously used (i.e. can be run at most 16 times). The test will pick the
- * first available location to use, or fail with ENOSPC if none left.
- */
-#define MC_CMD_XPM_WRITE_TEST 0x10b
-#undef MC_CMD_0x10b_PRIVILEGE_CTG
-
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
-#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
-
-/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_EXEC_SIGNED
- * Check the CMAC of the contents of IMEM and DMEM against the value supplied
- * and if correct begin execution from the start of IMEM. The caller supplies a
- * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
- * computation runs from the start of IMEM, and from the start of DMEM + 16k,
- * to match flash booting. The command will respond with EINVAL if the CMAC
- * does match, otherwise it will respond with success before it jumps to IMEM.
- */
-#define MC_CMD_EXEC_SIGNED 0x10c
-#undef MC_CMD_0x10c_PRIVILEGE_CTG
-
-#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_EXEC_SIGNED_IN msgrequest */
-#define MC_CMD_EXEC_SIGNED_IN_LEN 28
-/* the length of code to include in the CMAC */
-#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
-#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
-/* the length of date to include in the CMAC */
-#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
-#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
-/* the XPM sector containing the key to use */
-#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
-#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
-/* the expected CMAC value */
-#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
-#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
-
-/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
-#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PREPARE_SIGNED
- * Prepare to upload a signed image. This will scrub the specified length of
- * the data region, which must be at least as large as the DATALEN supplied to
- * MC_CMD_EXEC_SIGNED.
- */
-#define MC_CMD_PREPARE_SIGNED 0x10d
-#undef MC_CMD_0x10d_PRIVILEGE_CTG
-
-#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
-#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
-/* the length of data area to clear */
-#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
-#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
-
-/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
-#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
-
-
/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
/* UDP port (the standard ports are named below but any port may be used) */
@@ -23049,110 +21545,6 @@
/***********************************/
-/* MC_CMD_RX_BALANCING
- * Configure a port upconverter to distribute the packets on both RX engines.
- * Packets are distributed based on a table with the destination vFIFO. The
- * index of the table is a hash of source and destination of IPV4 and VLAN
- * priority.
- */
-#define MC_CMD_RX_BALANCING 0x118
-#undef MC_CMD_0x118_PRIVILEGE_CTG
-
-#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_RX_BALANCING_IN msgrequest */
-#define MC_CMD_RX_BALANCING_IN_LEN 16
-/* The RX port whose upconverter table will be modified */
-#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
-#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4
-/* The VLAN priority associated to the table index and vFIFO */
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
-/* The resulting bit of SRC^DST for indexing the table */
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
-/* The RX engine to which the vFIFO in the table entry will point to */
-#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
-#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4
-
-/* MC_CMD_RX_BALANCING_OUT msgresponse */
-#define MC_CMD_RX_BALANCING_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_NVRAM_PRIVATE_APPEND
- * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST
- * if the tag is already present.
- */
-#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
-#undef MC_CMD_0x11c_PRIVILEGE_CTG
-
-#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX_MCDI2 1020
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_NUM(len) (((len)-8)/1)
-/* The tag to be appended */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
-/* The length of the data */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
-/* The data to be contained in the TLV structure */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM_MCDI2 1012
-
-/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_VERIFY_CONTENTS
- * Verify that the contents of the XPM memory is correct (Medford only). This
- * is used during manufacture to check that the XPM memory has been programmed
- * correctly at ATE.
- */
-#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
-#undef MC_CMD_0x11b_PRIVILEGE_CTG
-
-#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
-/* Data type to be checked */
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
-
-/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_NUM(len) (((len)-12)/1)
-/* Number of sectors found (test builds only) */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
-/* Number of bytes found (test builds only) */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
-/* Length of signature */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
-/* Signature */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM_MCDI2 1008
-
-
-/***********************************/
/* MC_CMD_SET_EVQ_TMR
* Update the timer load, timer reload and timer mode values for a given EVQ.
* The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
@@ -23262,576 +21654,6 @@
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
-
-/***********************************/
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP
- * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the
- * non used switch buffers.
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
-#undef MC_CMD_0x11d_PRIVILEGE_CTG
-
-#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
-/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index. The calling client must be the currently-assigned user of
- * this VI (see MC_CMD_SET_VI_USER).
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
-/* Will the common pool be used as TX_vFIFO_ULL (1) */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
-/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
-/* Number of buffers to reserve for the common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
-/* TX datapath to which the Common Pool is connected to. */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
-/* enum: Extracts information from function */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
-/* Network port or RX Engine to which the common pool connects. */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
-/* enum: Extracts information from function */
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
-/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
-/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
-/* ID of the common pool allocated */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO
- * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the
- * previously allocated common pools.
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
-#undef MC_CMD_0x11e_PRIVILEGE_CTG
-
-#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
-/* Common pool previously allocated to which the new vFIFO will be associated
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
-/* Port or RX engine to associate the vFIFO egress */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
-/* enum: Extracts information from common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
-/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
-/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
-/* Minimum number of buffers that the pool must have */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
-/* enum: Do not check the space available */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
-/* Will the vFIFO be used as TX_vFIFO_ULL */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
-/* Network priority of the vFIFO,if applicable */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
-/* enum: Search for the lowest unused priority */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
-/* Short vFIFO ID */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
-/* Network priority of the vFIFO */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
-
-
-/***********************************/
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF
- * This interface clears the configuration of the given vFIFO and leaves it
- * ready to be re-used.
- */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
-#undef MC_CMD_0x11f_PRIVILEGE_CTG
-
-#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
-/* Short vFIFO ID */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
-
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP
- * This interface clears the configuration of the given common pool and leaves
- * it ready to be re-used.
- */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
-#undef MC_CMD_0x121_PRIVILEGE_CTG
-
-#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
-/* Common pool ID given when pool allocated */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
-
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS
- * This interface allows the host to find out how many common pool buffers are
- * not yet assigned.
- */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
-#undef MC_CMD_0x124_PRIVILEGE_CTG
-
-#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
-
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
-/* Available buffers for the ENG to NET vFIFOs. */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
-/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
-
-
-/***********************************/
-/* MC_CMD_SUC_VERSION
- * Get the version of the SUC
- */
-#define MC_CMD_SUC_VERSION 0x134
-#undef MC_CMD_0x134_PRIVILEGE_CTG
-
-#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SUC_VERSION_IN msgrequest */
-#define MC_CMD_SUC_VERSION_IN_LEN 0
-
-/* MC_CMD_SUC_VERSION_OUT msgresponse */
-#define MC_CMD_SUC_VERSION_OUT_LEN 24
-/* The SUC firmware version as four numbers - a.b.c.d */
-#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0
-#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4
-#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4
-/* The date, in seconds since the Unix epoch, when the firmware image was
- * built.
- */
-#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16
-#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4
-/* The ID of the SUC chip. This is specific to the platform but typically
- * indicates family, memory sizes etc. See SF-116728-SW for further details.
- */
-#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20
-#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4
-
-/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot
- * loader.
- */
-#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4
-#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0
-#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4
-/* enum: Requests the SUC boot version. */
-#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b
-
-/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */
-#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4
-/* The SUC boot version */
-#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0
-#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_RX_PREFIX_ID
- * This command is part of the mechanism for configuring the format of the RX
- * packet prefix. It takes as input a bitmask of the fields the host would like
- * to be in the prefix. If the hardware supports RX prefixes with that
- * combination of fields, then this command returns a list of prefix-ids,
- * opaque identifiers suitable for use in the RX_PREFIX_ID field of a
- * MC_CMD_INIT_RXQ_V5_IN message. If the combination of fields is not
- * supported, returns ENOTSUP. If the firmware can't create any new prefix-ids
- * due to resource constraints, returns ENOSPC.
- */
-#define MC_CMD_GET_RX_PREFIX_ID 0x13b
-#undef MC_CMD_0x13b_PRIVILEGE_CTG
-
-#define MC_CMD_0x13b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_RX_PREFIX_ID_IN msgrequest */
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LEN 8
-/* Field bitmask. */
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LBN 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_WIDTH 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LBN 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_WIDTH 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_LBN 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_LBN 2
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_LBN 3
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_LBN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_LBN 5
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_LBN 7
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_LBN 8
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_LBN 10
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_LBN 11
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_WIDTH 1
-
-/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX 252
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LEN(num) (4+4*(num))
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_NUM(len) (((len)-4)/4)
-/* Number of prefix-ids returned */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_LEN 4
-/* Opaque prefix identifiers which can be passed into MC_CMD_INIT_RXQ_V5 or
- * MC_CMD_QUERY_PREFIX_ID
- */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_OFST 4
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MINNUM 1
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM 62
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM_MCDI2 254
-
-/* RX_PREFIX_FIELD_INFO structuredef: Information about a single RX prefix
- * field
- */
-#define RX_PREFIX_FIELD_INFO_LEN 4
-/* The offset of the field from the start of the prefix, in bits */
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_OFST 0
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LEN 2
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LBN 0
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_WIDTH 16
-/* The width of the field, in bits */
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_OFST 2
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LEN 1
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LBN 16
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_WIDTH 8
-/* The type of the field. These enum values are in the same order as the fields
- * in the MC_CMD_GET_RX_PREFIX_ID_IN bitmask
- */
-#define RX_PREFIX_FIELD_INFO_TYPE_OFST 3
-#define RX_PREFIX_FIELD_INFO_TYPE_LEN 1
-#define RX_PREFIX_FIELD_INFO_LENGTH 0x0 /* enum */
-#define RX_PREFIX_FIELD_INFO_RSS_HASH_VALID 0x1 /* enum */
-#define RX_PREFIX_FIELD_INFO_USER_FLAG 0x2 /* enum */
-#define RX_PREFIX_FIELD_INFO_CLASS 0x3 /* enum */
-#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */
-#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */
-#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */
-#define RX_PREFIX_FIELD_INFO_INGRESS_MPORT 0x7 /* enum */
-#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */
-#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */
-#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */
-#define RX_PREFIX_FIELD_INFO_VLAN_STRIPPED 0xa /* enum */
-#define RX_PREFIX_FIELD_INFO_VSWITCH_STATUS 0xb /* enum */
-#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24
-#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8
-
-/* RX_PREFIX_FIXED_RESPONSE structuredef: Information about an RX prefix in
- * which every field has a fixed offset and width
- */
-#define RX_PREFIX_FIXED_RESPONSE_LENMIN 4
-#define RX_PREFIX_FIXED_RESPONSE_LENMAX 252
-#define RX_PREFIX_FIXED_RESPONSE_LENMAX_MCDI2 1020
-#define RX_PREFIX_FIXED_RESPONSE_LEN(num) (4+4*(num))
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_NUM(len) (((len)-4)/4)
-/* Length of the RX prefix in bytes */
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_OFST 0
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LEN 1
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LBN 0
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_WIDTH 8
-/* Number of fields present in the prefix */
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_OFST 1
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LEN 1
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LBN 8
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_WIDTH 8
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_OFST 2
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LEN 2
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LBN 16
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_WIDTH 16
-/* Array of RX_PREFIX_FIELD_INFO structures, of length FIELD_COUNT */
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_OFST 4
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LEN 4
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MINNUM 0
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM 62
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM_MCDI2 254
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LBN 32
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_QUERY_RX_PREFIX_ID
- * This command takes an RX prefix id (obtained from MC_CMD_GET_RX_PREFIX_ID)
- * and returns a description of the RX prefix of packets delievered to an RXQ
- * created with that prefix id
- */
-#define MC_CMD_QUERY_RX_PREFIX_ID 0x13c
-#undef MC_CMD_0x13c_PRIVILEGE_CTG
-
-#define MC_CMD_0x13c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_QUERY_RX_PREFIX_ID_IN msgrequest */
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_LEN 4
-/* Prefix id to query */
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_OFST 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_LEN 4
-
-/* MC_CMD_QUERY_RX_PREFIX_ID_OUT msgresponse */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMIN 4
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX 252
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_NUM(len) (((len)-4)/1)
-/* An enum describing the structure of this response. */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_OFST 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_LEN 1
-/* enum: The response is of format RX_PREFIX_FIXED_RESPONSE */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_FIXED 0x0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_OFST 1
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_LEN 3
-/* The response. Its format is as defined by the RESPONSE_TYPE value */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_OFST 4
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_LEN 1
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MINNUM 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM 248
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM_MCDI2 1016
-
-
-/***********************************/
-/* MC_CMD_BUNDLE
- * A command to perform various bundle-related operations on insecure cards.
- */
-#define MC_CMD_BUNDLE 0x13d
-#undef MC_CMD_0x13d_PRIVILEGE_CTG
-
-#define MC_CMD_0x13d_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_BUNDLE_IN msgrequest */
-#define MC_CMD_BUNDLE_IN_LEN 4
-/* Sub-command code */
-#define MC_CMD_BUNDLE_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_IN_OP_LEN 4
-/* enum: Get the current host access mode set on component partitions. */
-#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_GET 0x0
-/* enum: Set the host access mode set on component partitions. */
-#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_SET 0x1
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN msgrequest: Retrieve the current
- * access mode on component partitions such as MC_FIRMWARE, SUC_FIRMWARE and
- * EXPANSION_UEFI. This command only works on engineering (insecure) cards. On
- * secure adapters, this command returns MC_CMD_ERR_EPERM.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_LEN 4
-/* Sub-command code. Must be OP_COMPONENT_ACCESS_GET. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_LEN 4
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT msgresponse: Returns the access
- * control mode.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_LEN 4
-/* Access mode of component partitions. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_LEN 4
-/* enum: Component partitions are read-only from the host. */
-#define MC_CMD_BUNDLE_COMPONENTS_READ_ONLY 0x0
-/* enum: Component partitions can read read-from written-to by the host. */
-#define MC_CMD_BUNDLE_COMPONENTS_READ_WRITE 0x1
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN msgrequest: The component
- * partitions such as MC_FIRMWARE, SUC_FIRMWARE, EXPANSION_UEFI are set as
- * read-only on firmware built with bundle support. This command marks these
- * partitions as read/writeable. The access status set by this command does not
- * persist across MC reboots. This command only works on engineering (insecure)
- * cards. On secure adapters, this command returns MC_CMD_ERR_EPERM.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_LEN 8
-/* Sub-command code. Must be OP_COMPONENT_ACCESS_SET. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_LEN 4
-/* Access mode of component partitions. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_OFST 4
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT/ACCESS_MODE */
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT msgresponse */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_VPD
- * Read all VPD starting from a given address
- */
-#define MC_CMD_GET_VPD 0x165
-#undef MC_CMD_0x165_PRIVILEGE_CTG
-
-#define MC_CMD_0x165_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VPD_IN msgresponse */
-#define MC_CMD_GET_VPD_IN_LEN 4
-/* VPD address to start from. In case VPD is longer than MCDI buffer
- * (unlikely), user can make multiple calls with different starting addresses.
- */
-#define MC_CMD_GET_VPD_IN_ADDR_OFST 0
-#define MC_CMD_GET_VPD_IN_ADDR_LEN 4
-
-/* MC_CMD_GET_VPD_OUT msgresponse */
-#define MC_CMD_GET_VPD_OUT_LENMIN 0
-#define MC_CMD_GET_VPD_OUT_LENMAX 252
-#define MC_CMD_GET_VPD_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_VPD_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_GET_VPD_OUT_DATA_NUM(len) (((len)-0)/1)
-/* VPD data returned. */
-#define MC_CMD_GET_VPD_OUT_DATA_OFST 0
-#define MC_CMD_GET_VPD_OUT_DATA_LEN 1
-#define MC_CMD_GET_VPD_OUT_DATA_MINNUM 0
-#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM 252
-#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM_MCDI2 1020
-
-
-/***********************************/
-/* MC_CMD_GET_NCSI_INFO
- * Provide information about the NC-SI stack
- */
-#define MC_CMD_GET_NCSI_INFO 0x167
-#undef MC_CMD_0x167_PRIVILEGE_CTG
-
-#define MC_CMD_0x167_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_NCSI_INFO_IN msgrequest */
-#define MC_CMD_GET_NCSI_INFO_IN_LEN 8
-/* Operation to be performed */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_OFST 0
-#define MC_CMD_GET_NCSI_INFO_IN_OP_LEN 4
-/* enum: Information on the link settings. */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_LINK 0x0
-/* enum: Statistics associated with the channel */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_STATISTICS 0x1
-/* The NC-SI channel on which the operation is to be performed */
-#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_OFST 4
-#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_LEN 4
-
-/* MC_CMD_GET_NCSI_INFO_LINK_OUT msgresponse */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_LEN 12
-/* Settings as received from BMC. */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_OFST 0
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_LEN 4
-/* Advertised capabilities applied to channel. */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_OFST 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_LEN 4
-/* General status */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_LEN 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_LBN 0
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_WIDTH 2
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_LBN 2
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_WIDTH 1
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_LBN 3
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_WIDTH 1
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_LBN 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_WIDTH 1
-
-/* MC_CMD_GET_NCSI_INFO_STATISTICS_OUT msgresponse */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_LEN 28
-/* The number of NC-SI commands received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_OFST 0
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_LEN 4
-/* The number of NC-SI commands dropped. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_OFST 4
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_LEN 4
-/* The number of invalid NC-SI commands received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_OFST 8
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_LEN 4
-/* The number of checksum errors seen. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_OFST 12
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_LEN 4
-/* The number of NC-SI requests received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_OFST 16
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_LEN 4
-/* The number of NC-SI responses sent (includes AENs) */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_OFST 20
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_LEN 4
-/* The number of NC-SI AENs sent */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4
-
/* CLIENT_HANDLE structuredef: A client is an abstract entity that can make
* requests of the device and that can own resources managed by the device.
* Examples of clients include PCIe functions and dynamic clients. A client
@@ -23848,59 +21670,10 @@
#define CLIENT_HANDLE_OPAQUE_LBN 0
#define CLIENT_HANDLE_OPAQUE_WIDTH 32
-/* CLOCK_INFO structuredef: Information about a single hardware clock */
-#define CLOCK_INFO_LEN 28
-/* Enumeration that uniquely identifies the clock */
-#define CLOCK_INFO_CLOCK_ID_OFST 0
-#define CLOCK_INFO_CLOCK_ID_LEN 2
-/* enum: The Riverhead CMC (card MC) */
-#define CLOCK_INFO_CLOCK_CMC 0x0
-/* enum: The Riverhead NMC (network MC) */
-#define CLOCK_INFO_CLOCK_NMC 0x1
-/* enum: The Riverhead SDNET slice main logic */
-#define CLOCK_INFO_CLOCK_SDNET 0x2
-/* enum: The Riverhead SDNET LUT */
-#define CLOCK_INFO_CLOCK_SDNET_LUT 0x3
-/* enum: The Riverhead SDNET control logic */
-#define CLOCK_INFO_CLOCK_SDNET_CTRL 0x4
-/* enum: The Riverhead Streaming SubSystem */
-#define CLOCK_INFO_CLOCK_SSS 0x5
-/* enum: The Riverhead network MAC and associated CSR registers */
-#define CLOCK_INFO_CLOCK_MAC 0x6
-#define CLOCK_INFO_CLOCK_ID_LBN 0
-#define CLOCK_INFO_CLOCK_ID_WIDTH 16
-/* Assorted flags */
-#define CLOCK_INFO_FLAGS_OFST 2
-#define CLOCK_INFO_FLAGS_LEN 2
-#define CLOCK_INFO_SETTABLE_OFST 2
-#define CLOCK_INFO_SETTABLE_LBN 0
-#define CLOCK_INFO_SETTABLE_WIDTH 1
-#define CLOCK_INFO_FLAGS_LBN 16
-#define CLOCK_INFO_FLAGS_WIDTH 16
-/* The frequency in HZ */
-#define CLOCK_INFO_FREQUENCY_OFST 4
-#define CLOCK_INFO_FREQUENCY_LEN 8
-#define CLOCK_INFO_FREQUENCY_LO_OFST 4
-#define CLOCK_INFO_FREQUENCY_LO_LEN 4
-#define CLOCK_INFO_FREQUENCY_LO_LBN 32
-#define CLOCK_INFO_FREQUENCY_LO_WIDTH 32
-#define CLOCK_INFO_FREQUENCY_HI_OFST 8
-#define CLOCK_INFO_FREQUENCY_HI_LEN 4
-#define CLOCK_INFO_FREQUENCY_HI_LBN 64
-#define CLOCK_INFO_FREQUENCY_HI_WIDTH 32
-#define CLOCK_INFO_FREQUENCY_LBN 32
-#define CLOCK_INFO_FREQUENCY_WIDTH 64
-/* Human-readable ASCII name for clock, with NUL termination */
-#define CLOCK_INFO_NAME_OFST 12
-#define CLOCK_INFO_NAME_LEN 1
-#define CLOCK_INFO_NAME_NUM 16
-#define CLOCK_INFO_NAME_LBN 96
-#define CLOCK_INFO_NAME_WIDTH 8
-
/* SCHED_CREDIT_CHECK_RESULT structuredef */
#define SCHED_CREDIT_CHECK_RESULT_LEN 16
-/* The instance of the scheduler. Refer to XN-200389-AW for the location of
- * these schedulers in the hardware.
+/* The instance of the scheduler. Refer to XN-200389-AW (snic/hnic) and
+ * XN-200425-TC (cdx) for the location of these schedulers in the hardware.
*/
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_OFST 0
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LEN 1
@@ -23914,6 +21687,16 @@
#define SCHED_CREDIT_CHECK_RESULT_DMAC_H2C 0x7 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_B 0x8 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_REPLAY 0x9 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_ADAPTER_C2H_C 0xa /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A2_H2C_C 0xb /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A3_SOFT_ADAPTOR_C 0xc /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A4_DPU_WRITE_C 0xd /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_JRC_RRU 0xe /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_CDM_SINK 0xf /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_PCIE_SINK 0x10 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_UPORT_SINK 0x11 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_PSX_SINK 0x12 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A5_DPU_READ_C 0x13 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LBN 0
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_WIDTH 8
/* The type of node that this result refers to. */
@@ -23923,6 +21706,10 @@
#define SCHED_CREDIT_CHECK_RESULT_DEST 0x0
/* enum: Source node */
#define SCHED_CREDIT_CHECK_RESULT_SOURCE 0x1
+/* enum: Destination node credit type 1 (new to the Keystone schedulers, see
+ * SF-120268-TC)
+ */
+#define SCHED_CREDIT_CHECK_RESULT_DEST_CREDIT1 0x2
#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LBN 8
#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_WIDTH 8
/* Level of node in scheduler hierarchy (level 0 is the bottom of the
@@ -23950,592 +21737,6 @@
/***********************************/
-/* MC_CMD_GET_CLOCKS_INFO
- * Get information about the device clocks
- */
-#define MC_CMD_GET_CLOCKS_INFO 0x166
-#undef MC_CMD_0x166_PRIVILEGE_CTG
-
-#define MC_CMD_0x166_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_CLOCKS_INFO_IN msgrequest */
-#define MC_CMD_GET_CLOCKS_INFO_IN_LEN 0
-
-/* MC_CMD_GET_CLOCKS_INFO_OUT msgresponse */
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMIN 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX 252
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX_MCDI2 1008
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LEN(num) (0+28*(num))
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_NUM(len) (((len)-0)/28)
-/* An array of CLOCK_INFO structures. */
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_OFST 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_LEN 28
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MINNUM 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM 9
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM_MCDI2 36
-
-
-/***********************************/
-/* MC_CMD_VNIC_ENCAP_RULE_ADD
- * Add a rule for detecting encapsulations in the VNIC stage. Currently this
- * only affects checksum validation in VNIC RX - on TX the send descriptor
- * explicitly specifies encapsulation. These rules are per-VNIC, i.e. only
- * apply to the current driver. If a rule matches, then the packet is
- * considered to have the corresponding encapsulation type, and the inner
- * packet is parsed. It is up to the driver to ensure that overlapping rules
- * are not inserted. (If a packet would match multiple rules, a random one of
- * them will be used.) A rule with the exact same match criteria may not be
- * inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are
- * supported, use MC_CMD_GET_PARSER_DISP_INFO with OP
- * OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported
- * combinations. Each driver may only have a limited set of active rules -
- * returns ENOSPC if the caller's table is full.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
-#undef MC_CMD_0x16d_PRIVILEGE_CTG
-
-#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36
-/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4
-/* Any non-zero bits other than the ones named below or an unsupported
- * combination will cause the NIC to return EOPNOTSUPP. In the future more
- * flags may be added.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1
-/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order.
- * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2
-/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order.
- * (Deprecated)
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12
-/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12
-/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the
- * case of IPv4, the IP should be in the first 4 bytes and all other bytes
- * should be zero.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16
-/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1
-/* Actions that should be applied to packets match the rule. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_LBN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_LBN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_WIDTH 1
-/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
-/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4
-
-/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4
-/* Handle to inserted rule. Used for removing the rule. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
- * Remove a VNIC encapsulation rule. Packets which would have previously
- * matched the rule will then be considered as unencapsulated. Returns EALREADY
- * if the input HANDLE doesn't correspond to an existing rule.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
-#undef MC_CMD_0x16e_PRIVILEGE_CTG
-
-#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4
-/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4
-
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
-
-/* UUID structuredef: An RFC4122 standard UUID. The values here are stored in
- * the endianness specified by the RFC; users should ignore the broken-out
- * fields and instead do straight memory copies to ensure correct ordering.
- */
-#define UUID_LEN 16
-#define UUID_TIME_LOW_OFST 0
-#define UUID_TIME_LOW_LEN 4
-#define UUID_TIME_LOW_LBN 0
-#define UUID_TIME_LOW_WIDTH 32
-#define UUID_TIME_MID_OFST 4
-#define UUID_TIME_MID_LEN 2
-#define UUID_TIME_MID_LBN 32
-#define UUID_TIME_MID_WIDTH 16
-#define UUID_TIME_HI_LBN 52
-#define UUID_TIME_HI_WIDTH 12
-#define UUID_VERSION_LBN 48
-#define UUID_VERSION_WIDTH 4
-#define UUID_RESERVED_LBN 64
-#define UUID_RESERVED_WIDTH 2
-#define UUID_CLK_SEQ_LBN 66
-#define UUID_CLK_SEQ_WIDTH 14
-#define UUID_NODE_OFST 10
-#define UUID_NODE_LEN 6
-#define UUID_NODE_LBN 80
-#define UUID_NODE_WIDTH 48
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_ALLOC
- * Create a handle to a datapath plugin's extension. This involves finding a
- * currently-loaded plugin offering the given functionality (as identified by
- * the UUID) and allocating a handle to track the usage of it. Plugin
- * functionality is identified by 'extension' rather than any other identifier
- * so that a single plugin bitfile may offer more than one piece of independent
- * functionality. If two bitfiles are loaded which both offer the same
- * extension, then the metadata is interrogated further to determine which is
- * the newest and that is the one opened. See SF-123625-SW for architectural
- * detail on datapath plugins.
- */
-#define MC_CMD_PLUGIN_ALLOC 0x1ad
-#undef MC_CMD_0x1ad_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ad_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_ALLOC_IN msgrequest */
-#define MC_CMD_PLUGIN_ALLOC_IN_LEN 24
-/* The functionality requested of the plugin, as a UUID structure */
-#define MC_CMD_PLUGIN_ALLOC_IN_UUID_OFST 0
-#define MC_CMD_PLUGIN_ALLOC_IN_UUID_LEN 16
-/* Additional options for opening the handle */
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_LBN 0
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_WIDTH 1
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_LBN 1
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_WIDTH 1
-/* Load the extension only if it is in the specified administrative group.
- * Specify ANY to load the extension wherever it is found (if there are
- * multiple choices then the extension with the highest MINOR_VER/PATCH_VER
- * will be loaded). See MC_CMD_PLUGIN_GET_META_GLOBAL for a description of
- * administrative groups.
- */
-#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_OFST 20
-#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_LEN 2
-/* enum: Load the extension from any ADMIN_GROUP. */
-#define MC_CMD_PLUGIN_ALLOC_IN_ANY 0xffff
-/* Reserved */
-#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_OFST 22
-#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_LEN 2
-
-/* MC_CMD_PLUGIN_ALLOC_OUT msgresponse */
-#define MC_CMD_PLUGIN_ALLOC_OUT_LEN 4
-/* Unique identifier of this usage */
-#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_FREE
- * Delete a handle to a plugin's extension.
- */
-#define MC_CMD_PLUGIN_FREE 0x1ae
-#undef MC_CMD_0x1ae_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ae_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_FREE_IN msgrequest */
-#define MC_CMD_PLUGIN_FREE_IN_LEN 4
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_FREE_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_FREE_IN_HANDLE_LEN 4
-
-/* MC_CMD_PLUGIN_FREE_OUT msgresponse */
-#define MC_CMD_PLUGIN_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_GLOBAL
- * Returns the global metadata applying to the whole plugin extension. See the
- * other metadata calls for subtypes of data.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL 0x1af
-#undef MC_CMD_0x1af_PRIVILEGE_CTG
-
-#define MC_CMD_0x1af_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_GLOBAL_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_LEN 4
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_GLOBAL_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_LEN 36
-/* Unique identifier of this plugin extension. This is identical to the value
- * which was requested when the handle was allocated.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_OFST 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_LEN 16
-/* semver sub-version of this plugin extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_OFST 16
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_LEN 2
-/* semver micro-version of this plugin extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_OFST 18
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_LEN 2
-/* Number of different messages which can be sent to this extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_OFST 20
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_LEN 4
-/* Byte offset within the VI window of the plugin's mapped CSR window. */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_OFST 24
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_LEN 2
-/* Number of bytes mapped through to the plugin's CSRs. 0 if that feature was
- * not requested by the plugin (in which case MAPPED_CSR_OFFSET and
- * MAPPED_CSR_FLAGS are ignored).
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_OFST 26
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_LEN 2
-/* Flags indicating how to perform the CSR window mapping. */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_LBN 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_WIDTH 1
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_LBN 1
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_WIDTH 1
-/* Identifier of the set of extensions which all change state together.
- * Extensions having the same ADMIN_GROUP will always load and unload at the
- * same time. ADMIN_GROUP values themselves are arbitrary (but they contain a
- * generation number as an implementation detail to ensure that they're not
- * reused rapidly).
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_OFST 32
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_LEN 1
-/* Bitshift in MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY's MASK parameters
- * corresponding to this extension, i.e. set the bit 1<<PRIVILEGE_BIT to permit
- * access to this extension.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_OFST 33
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_LEN 1
-/* Reserved */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_OFST 34
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_LEN 2
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER
- * Returns metadata supplied by the plugin author which describes this
- * extension in a human-readable way. Contrast with
- * MC_CMD_PLUGIN_GET_META_GLOBAL, which returns information needed for software
- * to operate.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER 0x1b0
-#undef MC_CMD_0x1b0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_LEN 12
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_LEN 4
-/* Category of data to return */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_OFST 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_LEN 4
-/* enum: Top-level information about the extension. The returned data is an
- * array of key/value pairs using the keys in RFC5013 (Dublin Core) to describe
- * the extension. The data is a back-to-back list of zero-terminated strings;
- * the even-numbered fields (0,2,4,...) are keys and their following odd-
- * numbered fields are the corresponding values. Both keys and values are
- * nominally UTF-8. Per RFC5013, the same key may be repeated any number of
- * times. Note that all information (including the key/value structure itself
- * and the UTF-8 encoding) may have been provided by the plugin author, so
- * callers must be cautious about parsing it. Callers should parse only the
- * top-level structure to separate out the keys and values; the contents of the
- * values is not expected to be machine-readable.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_EXTENSION_KVS 0x0
-/* Byte position of the data to be returned within the full data block of the
- * given SUBTYPE.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_OFST 8
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMIN 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX 252
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Full length of the data block of the requested SUBTYPE, in bytes. */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_LEN 4
-/* The information requested by SUBTYPE. */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_OFST 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_LEN 1
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM 248
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM_MCDI2 1016
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_MSG
- * Returns the simple metadata for a specific plugin request message. This
- * supplies information necessary for the host to know how to build an
- * MC_CMD_PLUGIN_REQ request.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG 0x1b1
-#undef MC_CMD_0x1b1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_MSG_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_LEN 8
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_LEN 4
-/* Unique message ID to obtain */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_OFST 4
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_MSG_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_LEN 44
-/* Unique message ID. This is the same value as the input parameter; it exists
- * to allow future MCDI extensions which enumerate all messages.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_OFST 0
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_LEN 4
-/* Packed index number of this message, assigned by the MC to give each message
- * a unique ID in an array to allow for more efficient storage/management.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_OFST 4
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_LEN 4
-/* Short human-readable codename for this message. This is conventionally
- * formatted as a C identifier in the basic ASCII character set with any spare
- * bytes at the end set to 0, however this convention is not enforced by the MC
- * so consumers must check for all potential malformations before using it for
- * a trusted purpose.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_OFST 8
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_LEN 32
-/* Number of bytes of data which must be passed from the host kernel to the MC
- * for this message's payload, and which are passed back again in the response.
- * The MC's plugin metadata loader will have validated that the number of bytes
- * specified here will fit in to MC_CMD_PLUGIN_REQ_IN_DATA in a single MCDI
- * message.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_OFST 40
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_LEN 4
-
-/* PLUGIN_EXTENSION structuredef: Used within MC_CMD_PLUGIN_GET_ALL to describe
- * an individual extension.
- */
-#define PLUGIN_EXTENSION_LEN 20
-#define PLUGIN_EXTENSION_UUID_OFST 0
-#define PLUGIN_EXTENSION_UUID_LEN 16
-#define PLUGIN_EXTENSION_UUID_LBN 0
-#define PLUGIN_EXTENSION_UUID_WIDTH 128
-#define PLUGIN_EXTENSION_ADMIN_GROUP_OFST 16
-#define PLUGIN_EXTENSION_ADMIN_GROUP_LEN 1
-#define PLUGIN_EXTENSION_ADMIN_GROUP_LBN 128
-#define PLUGIN_EXTENSION_ADMIN_GROUP_WIDTH 8
-#define PLUGIN_EXTENSION_FLAG_ENABLED_LBN 136
-#define PLUGIN_EXTENSION_FLAG_ENABLED_WIDTH 1
-#define PLUGIN_EXTENSION_RESERVED_LBN 137
-#define PLUGIN_EXTENSION_RESERVED_WIDTH 23
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_ALL
- * Returns a list of all plugin extensions currently loaded and available. The
- * UUIDs returned can be passed to MC_CMD_PLUGIN_ALLOC in order to obtain more
- * detailed metadata via the MC_CMD_PLUGIN_GET_META_* family of requests. The
- * ADMIN_GROUP field collects how extensions are grouped in to units which are
- * loaded/unloaded together; extensions with the same value are in the same
- * group.
- */
-#define MC_CMD_PLUGIN_GET_ALL 0x1b2
-#undef MC_CMD_0x1b2_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_ALL_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_ALL_IN_LEN 4
-/* Additional options for querying. Note that if neither FLAG_INCLUDE_ENABLED
- * nor FLAG_INCLUDE_DISABLED are specified then the result set will be empty.
- */
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_LBN 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_WIDTH 1
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_LBN 1
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_WIDTH 1
-
-/* MC_CMD_PLUGIN_GET_ALL_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMIN 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX 240
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LEN(num) (0+20*(num))
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_NUM(len) (((len)-0)/20)
-/* The list of available plugin extensions, as an array of PLUGIN_EXTENSION
- * structs.
- */
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_LEN 20
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MINNUM 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM 12
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM_MCDI2 51
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_REQ
- * Send a command to a plugin. A plugin may define an arbitrary number of
- * 'messages' which it allows applications on the host system to send, each
- * identified by a 32-bit ID.
- */
-#define MC_CMD_PLUGIN_REQ 0x1b3
-#undef MC_CMD_0x1b3_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_REQ_IN msgrequest */
-#define MC_CMD_PLUGIN_REQ_IN_LENMIN 8
-#define MC_CMD_PLUGIN_REQ_IN_LENMAX 252
-#define MC_CMD_PLUGIN_REQ_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_REQ_IN_LEN(num) (8+1*(num))
-#define MC_CMD_PLUGIN_REQ_IN_DATA_NUM(len) (((len)-8)/1)
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_REQ_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_REQ_IN_HANDLE_LEN 4
-/* Message ID defined by the plugin author */
-#define MC_CMD_PLUGIN_REQ_IN_ID_OFST 4
-#define MC_CMD_PLUGIN_REQ_IN_ID_LEN 4
-/* Data blob being the parameter to the message. This must be of the length
- * specified by MC_CMD_PLUGIN_GET_META_MSG_IN_MCDI_PARAM_SIZE.
- */
-#define MC_CMD_PLUGIN_REQ_IN_DATA_OFST 8
-#define MC_CMD_PLUGIN_REQ_IN_DATA_LEN 1
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM 244
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM_MCDI2 1012
-
-/* MC_CMD_PLUGIN_REQ_OUT msgresponse */
-#define MC_CMD_PLUGIN_REQ_OUT_LENMIN 0
-#define MC_CMD_PLUGIN_REQ_OUT_LENMAX 252
-#define MC_CMD_PLUGIN_REQ_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_REQ_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_NUM(len) (((len)-0)/1)
-/* The input data, as transformed and/or updated by the plugin's eBPF. Will be
- * the same size as the input DATA parameter.
- */
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_OFST 0
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_LEN 1
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM 252
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM_MCDI2 1020
-
-/* DESC_ADDR_REGION structuredef: Describes a contiguous region of DESC_ADDR
- * space that maps to a contiguous region of TRGT_ADDR space. Addresses
- * DESC_ADDR in the range [DESC_ADDR_BASE:DESC_ADDR_BASE + 1 <<
- * WINDOW_SIZE_LOG2) map to TRGT_ADDR = DESC_ADDR - DESC_ADDR_BASE +
- * TRGT_ADDR_BASE.
- */
-#define DESC_ADDR_REGION_LEN 32
-/* The start of the region in DESC_ADDR space. */
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_OFST 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LEN 8
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_OFST 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LEN 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LBN 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_WIDTH 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_OFST 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LEN 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LBN 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_WIDTH 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LBN 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_WIDTH 64
-/* The start of the region in TRGT_ADDR space. Drivers can set this via
- * MC_CMD_SET_DESC_ADDR_REGIONS.
- */
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_OFST 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LEN 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_OFST 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LBN 64
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_WIDTH 32
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_OFST 12
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LBN 96
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_WIDTH 32
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LBN 64
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_WIDTH 64
-/* The size of the region. */
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_OFST 16
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LEN 4
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LBN 128
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_WIDTH 32
-/* The alignment restriction on TRGT_ADDR. TRGT_ADDR values set by the driver
- * must be a multiple of 1 << TRGT_ADDR_ALIGN_LOG2.
- */
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_OFST 20
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LBN 160
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_OFST 24
-#define DESC_ADDR_REGION_RSVD_LEN 8
-#define DESC_ADDR_REGION_RSVD_LO_OFST 24
-#define DESC_ADDR_REGION_RSVD_LO_LEN 4
-#define DESC_ADDR_REGION_RSVD_LO_LBN 192
-#define DESC_ADDR_REGION_RSVD_LO_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_HI_OFST 28
-#define DESC_ADDR_REGION_RSVD_HI_LEN 4
-#define DESC_ADDR_REGION_RSVD_HI_LBN 224
-#define DESC_ADDR_REGION_RSVD_HI_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_LBN 192
-#define DESC_ADDR_REGION_RSVD_WIDTH 64
-
-
-/***********************************/
/* MC_CMD_GET_DESC_ADDR_INFO
* Returns a description of the mapping from DESC_ADDR to TRGT_ADDR for the calling function's address space.
*/
@@ -24836,122 +22037,6 @@
/***********************************/
-/* MC_CMD_GET_BOARD_ATTR
- * Retrieve physical build-level board attributes as configured at
- * manufacturing stage. Fields originate from EEPROM and per-platform constants
- * in firmware. Fields are used in development to identify/ differentiate
- * boards based on build levels/parameters, and also in manufacturing to cross
- * check "what was programmed in manufacturing" is same as "what firmware
- * thinks has been programmed" as there are two layers to translation within
- * firmware before the attributes reach this MCDI handler. Some parameters are
- * retrieved as part of other commands and therefore not replicated here. See
- * GET_VERSION_OUT.
- */
-#define MC_CMD_GET_BOARD_ATTR 0x1c6
-#undef MC_CMD_0x1c6_PRIVILEGE_CTG
-
-#define MC_CMD_0x1c6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_BOARD_ATTR_IN msgrequest */
-#define MC_CMD_GET_BOARD_ATTR_IN_LEN 0
-
-/* MC_CMD_GET_BOARD_ATTR_OUT msgresponse */
-#define MC_CMD_GET_BOARD_ATTR_OUT_LEN 16
-/* Defines board capabilities and validity of attributes returned in this
- * response-message.
- */
-#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_LBN 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_LBN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_LBN 2
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_LEN 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_LBN 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_LBN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_LBN 16
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_WIDTH 8
-/* enum: The FPGA voltage on the adapter can be set to low */
-#define MC_CMD_FPGA_VOLTAGE_LOW 0x0
-/* enum: The FPGA voltage on the adapter can be set to regular */
-#define MC_CMD_FPGA_VOLTAGE_REG 0x1
-/* enum: The FPGA voltage on the adapter can be set to high */
-#define MC_CMD_FPGA_VOLTAGE_HIGH 0x2
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_LBN 24
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_WIDTH 8
-/* An array of cage types on the board */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_OFST 8
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_LEN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_NUM 8
-/* enum: The cages are not known */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_UNKNOWN 0x0
-/* enum: The cages are SFP/SFP+ */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_SFP 0x1
-/* enum: The cages are QSFP/QSFP+ */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_QSFP 0x2
-
-
-/***********************************/
-/* MC_CMD_GET_SOC_STATE
- * Retrieve current state of the System-on-Chip. This command is valid when
- * MC_CMD_GET_BOARD_ATTR:HAS_SOC is set.
- */
-#define MC_CMD_GET_SOC_STATE 0x1c7
-#undef MC_CMD_0x1c7_PRIVILEGE_CTG
-
-#define MC_CMD_0x1c7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_SOC_STATE_IN msgrequest */
-#define MC_CMD_GET_SOC_STATE_IN_LEN 0
-
-/* MC_CMD_GET_SOC_STATE_OUT msgresponse */
-#define MC_CMD_GET_SOC_STATE_OUT_LEN 12
-/* Status flags for the SoC */
-#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_LBN 0
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_WIDTH 1
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_LBN 1
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_WIDTH 1
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_LBN 2
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_WIDTH 1
-/* Status fields for the SoC */
-#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_OFST 4
-#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_LEN 4
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_OFST 4
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_LBN 0
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_WIDTH 8
-/* enum: Power on (set by SUC on power up) */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOT 0x0
-/* enum: Running bootloader */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOTLOADER 0x1
-/* enum: Bootloader has started OS. OS is booting */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_START 0x2
-/* enum: OS is running */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_RUNNING 0x3
-/* enum: Maintenance OS is running */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_MAINTENANCE 0x4
-/* Number of SoC resets since power on */
-#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_OFST 8
-#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_LEN 4
-
-
-/***********************************/
/* MC_CMD_CHECK_SCHEDULER_CREDITS
* For debugging purposes. For each source and destination node in the hardware
* schedulers, check whether the number of credits is as it should be. This
@@ -25010,76 +22095,6 @@
/***********************************/
-/* MC_CMD_TXQ_STATS
- * Query per-TXQ statistics.
- */
-#define MC_CMD_TXQ_STATS 0x1d5
-#undef MC_CMD_0x1d5_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TXQ_STATS_IN msgrequest */
-#define MC_CMD_TXQ_STATS_IN_LEN 8
-/* Instance of TXQ to retrieve statistics for */
-#define MC_CMD_TXQ_STATS_IN_INSTANCE_OFST 0
-#define MC_CMD_TXQ_STATS_IN_INSTANCE_LEN 4
-/* Flags for the request */
-#define MC_CMD_TXQ_STATS_IN_FLAGS_OFST 4
-#define MC_CMD_TXQ_STATS_IN_FLAGS_LEN 4
-#define MC_CMD_TXQ_STATS_IN_CLEAR_OFST 4
-#define MC_CMD_TXQ_STATS_IN_CLEAR_LBN 0
-#define MC_CMD_TXQ_STATS_IN_CLEAR_WIDTH 1
-
-/* MC_CMD_TXQ_STATS_OUT msgresponse */
-#define MC_CMD_TXQ_STATS_OUT_LENMIN 0
-#define MC_CMD_TXQ_STATS_OUT_LENMAX 248
-#define MC_CMD_TXQ_STATS_OUT_LENMAX_MCDI2 1016
-#define MC_CMD_TXQ_STATS_OUT_LEN(num) (0+8*(num))
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_NUM(len) (((len)-0)/8)
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_OFST 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LEN 8
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_OFST 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LEN 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LBN 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_WIDTH 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_OFST 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LEN 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LBN 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_WIDTH 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MINNUM 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM 31
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM_MCDI2 127
-#define MC_CMD_TXQ_STATS_CTPIO_MAX_FILL 0x0 /* enum */
-
-/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
- * defined in SF-120734-TC with more information in SF-122717-TC.
- */
-#define FUNCTION_PERSONALITY_LEN 4
-#define FUNCTION_PERSONALITY_ID_OFST 0
-#define FUNCTION_PERSONALITY_ID_LEN 4
-/* enum: Function has no assigned personality */
-#define FUNCTION_PERSONALITY_NULL 0x0
-/* enum: Function has an EF100-style function control window and VI windows
- * with both EF100 and vDPA doorbells.
- */
-#define FUNCTION_PERSONALITY_EF100 0x1
-/* enum: Function has virtio net device configuration registers and doorbells
- * for virtio queue pairs.
- */
-#define FUNCTION_PERSONALITY_VIRTIO_NET 0x2
-/* enum: Function has virtio block device configuration registers and a
- * doorbell for a single virtqueue.
- */
-#define FUNCTION_PERSONALITY_VIRTIO_BLK 0x3
-/* enum: Function is a Xilinx acceleration device - management function */
-#define FUNCTION_PERSONALITY_ACCEL_MGMT 0x4
-/* enum: Function is a Xilinx acceleration device - user function */
-#define FUNCTION_PERSONALITY_ACCEL_USR 0x5
-#define FUNCTION_PERSONALITY_ID_LBN 0
-#define FUNCTION_PERSONALITY_ID_WIDTH 32
-
-
-/***********************************/
/* MC_CMD_VIRTIO_GET_FEATURES
* Get a list of the virtio features supported by the device.
*/
@@ -25162,37 +22177,6 @@
/***********************************/
-/* MC_CMD_VIRTIO_GET_CAPABILITIES
- * Get virtio capabilities supported by the device. Returns general virtio
- * capabilities and limitations of the hardware / firmware implementation
- * (hardware device as a whole), rather than that of individual configured
- * virtio devices. At present, only the absolute maximum number of queues
- * allowed on multi-queue devices is returned. Response is expected to be
- * extended as necessary in the future.
- */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES 0x1d3
-#undef MC_CMD_0x1d3_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VIRTIO_GET_CAPABILITIES_IN msgrequest */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_LEN 4
-/* Type of device to get capabilities for. Matches the device id as defined by
- * the virtio spec.
- */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_OFST 0
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
-
-/* MC_CMD_VIRTIO_GET_CAPABILITIES_OUT msgresponse */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_LEN 4
-/* Maximum number of queues supported for a single device instance */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_OFST 0
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_LEN 4
-
-
-/***********************************/
/* MC_CMD_VIRTIO_INIT_QUEUE
* Create a virtio virtqueue. Fails with EALREADY if the queue already exists.
* Fails with ENOSUP if a feature is requested that isn't supported. Fails with
@@ -25474,866 +22458,6 @@
#define PCIE_FUNCTION_INTF_LBN 32
#define PCIE_FUNCTION_INTF_WIDTH 32
-/* QUEUE_ID structuredef: Structure representing an absolute queue identifier
- * (absolute VI number + VI relative queue number). On Keystone, a VI can
- * contain multiple queues (at present, up to 2), each with separate controls
- * for direction. This structure is required to uniquely identify the absolute
- * source queue for descriptor proxy functions.
- */
-#define QUEUE_ID_LEN 4
-/* Absolute VI number */
-#define QUEUE_ID_ABS_VI_OFST 0
-#define QUEUE_ID_ABS_VI_LEN 2
-#define QUEUE_ID_ABS_VI_LBN 0
-#define QUEUE_ID_ABS_VI_WIDTH 16
-/* Relative queue number within the VI */
-#define QUEUE_ID_REL_QUEUE_LBN 16
-#define QUEUE_ID_REL_QUEUE_WIDTH 1
-#define QUEUE_ID_RESERVED_LBN 17
-#define QUEUE_ID_RESERVED_WIDTH 15
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CREATE
- * Descriptor proxy functions are abstract devices that forward all request
- * submitted to the host PCIe function (descriptors submitted to Virtio or
- * EF100 queues) to be handled on another function (most commonly on the
- * embedded Application Processor), via EF100 descriptor proxy, memory-to-
- * memory and descriptor-to-completion mechanisms. Primary user is Virtio-blk
- * subsystem, see SF-122927-TC. This function allocates a new descriptor proxy
- * function on the host and assigns a user-defined label. The actual function
- * configuration is not persisted until the caller configures it with
- * MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN and commits with
- * MC_CMD_DESC_PROXY_FUNC_COMMIT_IN.
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE 0x172
-#undef MC_CMD_0x172_PRIVILEGE_CTG
-
-#define MC_CMD_0x172_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CREATE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LEN 52
-/* PCIe Function ID to allocate (as struct PCIE_FUNCTION). Set to
- * {PF_ANY,VF_ANY,interface} for "any available function" Set to
- * {PF_ANY,VF_NULL,interface} for "any available PF"
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LBN 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_OFST 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_LEN 4
-/* The personality to set. The meanings of the personalities are defined in
- * SF-120734-TC with more information in SF-122717-TC. At present, we only
- * support proxying for VIRTIO_BLK
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_OFST 12
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_LEN 40
-
-/* MC_CMD_DESC_PROXY_FUNC_CREATE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_LEN 12
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_LEN 4
-/* Allocated function ID (as struct PCIE_FUNCTION) */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LBN 64
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_OFST 6
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY
- * Remove an existing descriptor proxy function. Underlying function
- * personality and configuration reverts back to factory default. Function
- * configuration is committed immediately to specified store and any function
- * ownership is released.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY 0x173
-#undef MC_CMD_0x173_PRIVILEGE_CTG
-
-#define MC_CMD_0x173_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LEN 44
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_LEN 40
-/* Store from which to remove function configuration */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_OFST 40
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT/MC_CMD_DESC_PROXY_FUNC_COMMIT_IN/STORE */
-
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT_LEN 0
-
-/* VIRTIO_BLK_CONFIG structuredef: Virtio block device configuration. See
- * Virtio specification v1.1, Sections 5.2.3 and 6 for definition of feature
- * bits. See Virtio specification v1.1, Section 5.2.4 (struct
- * virtio_blk_config) for definition of remaining configuration fields
- */
-#define VIRTIO_BLK_CONFIG_LEN 68
-/* Virtio block device features to advertise, per Virtio 1.1, 5.2.3 and 6 */
-#define VIRTIO_BLK_CONFIG_FEATURES_OFST 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LEN 8
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_LEN 4
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_LBN 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_WIDTH 32
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_LEN 4
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_LBN 32
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_WIDTH 32
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_LBN 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_LBN 2
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_LBN 4
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_LBN 5
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_LBN 6
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_LBN 7
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_LBN 9
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_LBN 10
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_LBN 11
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_LBN 12
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_LBN 13
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_LBN 14
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_LBN 28
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_LBN 29
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_LBN 32
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_LBN 33
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_LBN 34
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_LBN 35
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_LBN 36
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_LBN 37
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_LBN 38
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_WIDTH 1
-#define VIRTIO_BLK_CONFIG_FEATURES_LBN 0
-#define VIRTIO_BLK_CONFIG_FEATURES_WIDTH 64
-/* The capacity of the device (expressed in 512-byte sectors) */
-#define VIRTIO_BLK_CONFIG_CAPACITY_OFST 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LEN 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LEN 4
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LBN 64
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_WIDTH 32
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LEN 4
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LBN 96
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_WIDTH 32
-#define VIRTIO_BLK_CONFIG_CAPACITY_LBN 64
-#define VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64
-/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is
- * set.
- */
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_OFST 16
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_LEN 4
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_LBN 128
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_WIDTH 32
-/* Maximum number of segments in a request. Only valid when
- * VIRTIO_BLK_F_SEG_MAX is set.
- */
-#define VIRTIO_BLK_CONFIG_SEG_MAX_OFST 20
-#define VIRTIO_BLK_CONFIG_SEG_MAX_LEN 4
-#define VIRTIO_BLK_CONFIG_SEG_MAX_LBN 160
-#define VIRTIO_BLK_CONFIG_SEG_MAX_WIDTH 32
-/* Disk-style geometry - cylinders. Only valid when VIRTIO_BLK_F_GEOMETRY is
- * set.
- */
-#define VIRTIO_BLK_CONFIG_CYLINDERS_OFST 24
-#define VIRTIO_BLK_CONFIG_CYLINDERS_LEN 2
-#define VIRTIO_BLK_CONFIG_CYLINDERS_LBN 192
-#define VIRTIO_BLK_CONFIG_CYLINDERS_WIDTH 16
-/* Disk-style geometry - heads. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
- */
-#define VIRTIO_BLK_CONFIG_HEADS_OFST 26
-#define VIRTIO_BLK_CONFIG_HEADS_LEN 1
-#define VIRTIO_BLK_CONFIG_HEADS_LBN 208
-#define VIRTIO_BLK_CONFIG_HEADS_WIDTH 8
-/* Disk-style geometry - sectors. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
- */
-#define VIRTIO_BLK_CONFIG_SECTORS_OFST 27
-#define VIRTIO_BLK_CONFIG_SECTORS_LEN 1
-#define VIRTIO_BLK_CONFIG_SECTORS_LBN 216
-#define VIRTIO_BLK_CONFIG_SECTORS_WIDTH 8
-/* Block size of disk. Only valid when VIRTIO_BLK_F_BLK_SIZE is set. */
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_OFST 28
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_LEN 4
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_LBN 224
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_WIDTH 32
-/* Block topology - number of logical blocks per physical block (log2). Only
- * valid when VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_OFST 32
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LEN 1
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LBN 256
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_WIDTH 8
-/* Block topology - offset of first aligned logical block. Only valid when
- * VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_OFST 33
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LEN 1
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LBN 264
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_WIDTH 8
-/* Block topology - suggested minimum I/O size in blocks. Only valid when
- * VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_OFST 34
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LEN 2
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LBN 272
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_WIDTH 16
-/* Block topology - optimal (suggested maximum) I/O size in blocks. Only valid
- * when VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_OFST 36
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LEN 4
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LBN 288
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_WIDTH 32
-/* Unused, set to zero. Note that virtio_blk_config.writeback is volatile and
- * not carried in config data.
- */
-#define VIRTIO_BLK_CONFIG_UNUSED0_OFST 40
-#define VIRTIO_BLK_CONFIG_UNUSED0_LEN 2
-#define VIRTIO_BLK_CONFIG_UNUSED0_LBN 320
-#define VIRTIO_BLK_CONFIG_UNUSED0_WIDTH 16
-/* Number of queues. Only valid if the VIRTIO_BLK_F_MQ feature is negotiated.
- */
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_OFST 42
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LEN 2
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LBN 336
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_WIDTH 16
-/* Maximum discard sectors size, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_OFST 44
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LBN 352
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_WIDTH 32
-/* Maximum discard segment number. Only valid if VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_OFST 48
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LBN 384
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_WIDTH 32
-/* Discard sector alignment, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_OFST 52
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LEN 4
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LBN 416
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_WIDTH 32
-/* Maximum write zeroes sectors size, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_WRITE_ZEROES is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_OFST 56
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LBN 448
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_WIDTH 32
-/* Maximum write zeroes segment number. Only valid if VIRTIO_BLK_F_WRITE_ZEROES
- * is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_OFST 60
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LBN 480
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_WIDTH 32
-/* Write zeroes request can result in deallocating one or more sectors. Only
- * valid if VIRTIO_BLK_F_WRITE_ZEROES is set.
- */
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_OFST 64
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LEN 1
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LBN 512
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_WIDTH 8
-/* Unused, set to zero. */
-#define VIRTIO_BLK_CONFIG_UNUSED1_OFST 65
-#define VIRTIO_BLK_CONFIG_UNUSED1_LEN 3
-#define VIRTIO_BLK_CONFIG_UNUSED1_LBN 520
-#define VIRTIO_BLK_CONFIG_UNUSED1_WIDTH 24
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET
- * Set configuration for an existing descriptor proxy function. Configuration
- * data must match function personality. The actual function configuration is
- * not persisted until the caller commits with MC_CMD_DESC_PROXY_FUNC_COMMIT_IN
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET 0x174
-#undef MC_CMD_0x174_PRIVILEGE_CTG
-
-#define MC_CMD_0x174_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMIN 20
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LEN(num) (20+1*(num))
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_NUM(len) (((len)-20)/1)
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_LEN 4
-/* Reserved for future extension, set to zero. */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_LEN 16
-/* Configuration data. Format of configuration data is determined implicitly
- * from function personality referred to by HANDLE. Currently, only supported
- * format is VIRTIO_BLK_CONFIG.
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_OFST 20
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_LEN 1
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM 232
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM_MCDI2 1000
-
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT
- * Commit function configuration to non-volatile or volatile store. Once
- * configuration is applied to hardware (which may happen immediately or on
- * next function/device reset) a DESC_PROXY_FUNC_CONFIG_SET MCDI event will be
- * delivered to callers MCDI event queue.
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT 0x175
-#undef MC_CMD_0x175_PRIVILEGE_CTG
-
-#define MC_CMD_0x175_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_LEN 4
-/* enum: Store into non-volatile (dynamic) config */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_NON_VOLATILE 0x0
-/* enum: Store into volatile (ephemeral) config */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_VOLATILE 0x1
-
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_LEN 4
-/* Generation count to be delivered in an event once configuration becomes live
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_OPEN
- * Retrieve a handle for an existing descriptor proxy function. Returns an
- * integer handle, valid until function is deallocated, MC rebooted or power-
- * cycle. Returns ENODEV if no function with given label exists.
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN 0x176
-#undef MC_CMD_0x176_PRIVILEGE_CTG
-
-#define MC_CMD_0x176_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_OPEN_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LEN 40
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_LEN 40
-
-/* MC_CMD_DESC_PROXY_FUNC_OPEN_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMIN 40
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LEN(num) (40+1*(num))
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_NUM(len) (((len)-40)/1)
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_LEN 4
-/* PCIe Function ID (as struct PCIE_FUNCTION) */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LBN 64
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_OFST 6
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_LEN 4
-/* Function personality */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-/* Function configuration state */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_OFST 16
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_LEN 4
-/* enum: Function configuration is visible to the host (live) */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0
-/* enum: Function configuration is pending reset */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1
-/* enum: Function configuration is missing (created, but no configuration
- * committed)
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_UNCONFIGURED 0x2
-/* Generation count to be delivered in an event once the configuration becomes
- * live (if status is "pending")
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_OFST 20
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_LEN 4
-/* Reserved for future extension, set to zero. */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16
-/* Configuration data corresponding to function personality. Currently, only
- * supported format is VIRTIO_BLK_CONFIG. Not valid if status is UNCONFIGURED.
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM 212
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM_MCDI2 980
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE
- * Releases a handle for an open descriptor proxy function. If proxying was
- * enabled on the device, the caller is expected to gracefully stop it using
- * MC_CMD_DESC_PROXY_FUNC_DISABLE prior to calling this function. Closing an
- * active device without disabling proxying will result in forced close, which
- * will put the device into a failed state and signal the host driver of the
- * error (for virtio, DEVICE_NEEDS_RESET flag would be set on the host side)
- */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE 0x1a1
-#undef MC_CMD_0x1a1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_LEN 4
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT_LEN 0
-
-/* DESC_PROXY_FUNC_MAP structuredef */
-#define DESC_PROXY_FUNC_MAP_LEN 52
-/* PCIe function ID (as struct PCIE_FUNCTION) */
-#define DESC_PROXY_FUNC_MAP_FUNC_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LEN 8
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_WIDTH 32
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_LBN 32
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_WIDTH 32
-#define DESC_PROXY_FUNC_MAP_FUNC_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_LEN 2
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_WIDTH 16
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_OFST 2
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_LEN 2
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_LBN 16
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_WIDTH 16
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_OFST 4
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LBN 32
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_WIDTH 32
-/* Function personality */
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_LBN 64
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_WIDTH 32
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define DESC_PROXY_FUNC_MAP_LABEL_OFST 12
-#define DESC_PROXY_FUNC_MAP_LABEL_LEN 40
-#define DESC_PROXY_FUNC_MAP_LABEL_LBN 96
-#define DESC_PROXY_FUNC_MAP_LABEL_WIDTH 320
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENUM
- * Enumerate existing descriptor proxy functions
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM 0x177
-#undef MC_CMD_0x177_PRIVILEGE_CTG
-
-#define MC_CMD_0x177_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENUM_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_LEN 4
-/* Starting index, set to 0 on first request. See
- * MC_CMD_DESC_PROXY_FUNC_ENUM_OUT/FLAGS.
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENUM_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMIN 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX 212
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX_MCDI2 992
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LEN(num) (4+52*(num))
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_NUM(len) (((len)-4)/52)
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_LBN 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_WIDTH 1
-/* Function map, as array of DESC_PROXY_FUNC_MAP */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_LEN 52
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM_MCDI2 19
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE
- * Enable descriptor proxying for function into target event queue. Returns VI
- * allocation info for the proxy source function, so that the caller can map
- * absolute VI IDs from descriptor proxy events back to the originating
- * function. This is a legacy function that only supports single queue proxy
- * devices. It is also limited in that it can only be called after host driver
- * attach (once VI allocation is known) and will return MC_CMD_ERR_ENOTCONN
- * otherwise. For new code, see MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE which
- * supports multi-queue devices and has no dependency on host driver attach.
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178
-#undef MC_CMD_0x178_PRIVILEGE_CTG
-
-#define MC_CMD_0x178_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_LEN 4
-/* Descriptor proxy sink queue (caller function relative). Must be extended
- * width event queue
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_LEN 8
-/* The number of VIs allocated on the function */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_LEN 4
-/* The base absolute VI number allocated to the function. */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE
- * Enable descriptor proxying for a source queue on a host function into target
- * event queue. Source queue number is a relative virtqueue number on the
- * source function (0 to max_virtqueues-1). For a multi-queue device, the
- * caller must enable all source queues individually. To retrieve absolute VI
- * information for the source function (so that VI IDs from descriptor proxy
- * events can be mapped back to source function / queue) see
- * MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE 0x1d0
-#undef MC_CMD_0x1d0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_LEN 12
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_LEN 4
-/* Source relative queue number to enable proxying on */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
-/* Descriptor proxy sink queue (caller function relative). Must be extended
- * width event queue
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE
- * Disable descriptor proxying for function. For multi-queue functions,
- * disables all queues.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179
-#undef MC_CMD_0x179_PRIVILEGE_CTG
-
-#define MC_CMD_0x179_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_LEN 4
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE
- * Disable descriptor proxying for a specific source queue on a function.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE 0x1d1
-#undef MC_CMD_0x1d1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_LEN 4
-/* Source relative queue number to disable proxying on */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_GET_VI_INFO
- * Returns absolute VI allocation information for the descriptor proxy source
- * function referenced by HANDLE, so that the caller can map absolute VI IDs
- * from descriptor proxy events back to the originating function and queue. The
- * call is only valid after the host driver for the source function has
- * attached (after receiving a driver attach event for the descriptor proxy
- * function) and will fail with ENOTCONN otherwise.
- */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO 0x1d2
-#undef MC_CMD_0x1d2_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_GET_VI_INFO_IN msgrequest */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_LEN 4
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMIN 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_NUM(len) (((len)-0)/4)
-/* VI information (VI ID + VI relative queue number) for each of the source
- * queues (in order from 0 to max_virtqueues-1), as array of QUEUE_ID
- * structures.
- */
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM 63
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM_MCDI2 255
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_LBN 16
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_WIDTH 1
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_LBN 17
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_WIDTH 15
-
-
-/***********************************/
-/* MC_CMD_GET_ADDR_SPC_ID
- * Get Address space identifier for use in mem2mem descriptors for a given
- * target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem
- * descriptors.
- */
-#define MC_CMD_GET_ADDR_SPC_ID 0x1a0
-#undef MC_CMD_0x1a0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_GET_ADDR_SPC_ID_IN msgrequest */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_LEN 16
-/* Resource type to get ADDR_SPC_ID for */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_LEN 4
-/* enum: Address space ID for host/AP memory DMA over the same interface this
- * MCDI was called on
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_SELF 0x0
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC 0x1
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC with PASID value specified by PASID
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC_PASID 0x2
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC with PASID value of relative VI specified by VI
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_REL_VI 0x3
-/* enum: Address space ID for host/AP memory DMA via PCI interface, function
- * and PASID value of absolute VI specified by VI
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_ABS_VI 0x4
-/* enum: Address space ID for host memory DMA via PCI interface and function of
- * descriptor proxy function specified by HANDLE
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_DESC_PROXY_HANDLE 0x5
-/* enum: Address space ID for DMA to/from MC memory */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_MC_MEM 0x6
-/* enum: Address space ID for DMA to/from other SmartNIC memory (on-chip, DDR)
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_NIC_MEM 0x7
-/* PCIe Function ID (as struct PCIE_FUNCTION). Only valid if TYPE is PCI_FUNC,
- * PCI_FUNC_PASID or REL_VI.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LBN 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LBN 64
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_LEN 2
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_OFST 6
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_LEN 2
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_OFST 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_LEN 4
-/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4
-/* Relative or absolute VI number. Only valid if TYPE is REL_VI or ABS_VI */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_OFST 12
-#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_LEN 4
-/* Descriptor proxy function handle. Only valid if TYPE is DESC_PROXY_HANDLE.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_LEN 4
-
-/* MC_CMD_GET_ADDR_SPC_ID_OUT msgresponse */
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_LEN 8
-/* Address Space ID for the requested target. Only the lower 36 bits are valid
- * in the current SmartNIC implementation.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LBN 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LBN 32
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_WIDTH 32
-
/***********************************/
/* MC_CMD_GET_CLIENT_HANDLE
@@ -26359,7 +22483,8 @@
* INTF=CALLER, PF=PF_NULL, VF=... to refer to a VF child of the calling PF or
* a sibling VF of the calling VF. - INTF=CALLER, PF=..., VF=VF_NULL to refer
* to a PF on the calling interface - INTF=CALLER, PF=..., VF=... to refer to a
- * VF on the calling interface - INTF=..., PF=..., VF=VF_NULL to refer to a PF
+ * VF on the calling interface - INTF=..., PF=PF_NULL, VF=VF_NULL to refer to
+ * the named interface itself - INTF=..., PF=..., VF=VF_NULL to refer to a PF
* on a named interface - INTF=..., PF=..., VF=... to refer to a VF on a named
* interface where ... refers to a small integer for the VF/PF fields, and to
* values from the PCIE_INTERFACE enum for for the INTF field. It's only
@@ -26380,6 +22505,7 @@
* backwards compatibility only, callers should use PCIE_INTERFACE_CALLER.
*/
#define MC_CMD_GET_CLIENT_HANDLE_IN_PCIE_FUNCTION_INTF_NULL 0xffffffff
+/* See structuredef: PCIE_FUNCTION */
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_OFST 4
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_LEN 2
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_OFST 6
@@ -27350,7 +23476,7 @@
/* MAE_MPORT_SELECTOR structuredef: MPORTS are identified by an opaque unsigned
* integer value (mport_id) that is guaranteed to be representable within
* 32-bits or within any NIC interface field that needs store the value
- * (whichever is narrowers). This selector structure provides a stable way to
+ * (whichever is narrower). This selector structure provides a stable way to
* refer to m-ports.
*/
#define MAE_MPORT_SELECTOR_LEN 4
@@ -27425,10 +23551,22 @@
#define MAE_MPORT_SELECTOR_FLAT_WIDTH 32
/* MAE_LINK_ENDPOINT_SELECTOR structuredef: Structure that identifies a real or
- * virtual network port by MAE port and link end
+ * virtual network port by MAE port and link end. Intended to be used by
+ * network port MCDI commands. Setting FLAT to MAE_LINK_ENDPOINT_COMPAT is
+ * equivalent to using the previous version of the command. Not all possible
+ * combinations of MPORT_END and MPORT_SELECTOR in MAE_LINK_ENDPOINT_SELECTOR
+ * will work in all circumstances. 1. Some will always work (e.g. a VF can
+ * always address its logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC),
+ * 2. Some are not meaningful and will always fail with EINVAL (e.g. attempting
+ * to address the VNIC end of a link to a physical port), 3. Some are
+ * meaningful but require the MCDI client to have the required permission and
+ * fail with EPERM otherwise (e.g. trying to set the MAC on a VF the caller
+ * cannot administer), and 4. Some could be implementation-specific and fail
+ * with ENOTSUP if not available (no examples exist right now). See
+ * SF-123581-TC section 4.3 for more details.
*/
#define MAE_LINK_ENDPOINT_SELECTOR_LEN 8
-/* The MAE MPORT of interest */
+/* Identifier for the MAE MPORT of interest */
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_OFST 0
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LEN 4
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LBN 0
@@ -27829,6 +23967,8 @@
#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2 253
/* enum: A counter ID that is guaranteed never to represent a real counter */
#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL 0xffffffff
+/* Other enum values, see field(s): */
+/* MAE_COUNTER_ID */
/***********************************/
@@ -28266,6 +24406,24 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_OFST 0
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_LBN 14
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_WIDTH 1
/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_OFST 4
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_LEN 2
@@ -28291,19 +24449,23 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_OFST 20
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_LEN 4
/* Allows an action set to trigger several counter updates. Set to
- * COUNTER_LIST_ID_NULL to request no counter action.
+ * MAE_COUNTER_ID_NULL to request no counter action.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_OFST 24
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
/* If a driver only wished to update one counter within this action set, then
* it can supply a COUNTER_ID instead of allocating a single-element counter
* list. The ID must have been allocated with COUNTER_TYPE=AR. This field
- * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
- * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
* COUNTER_ID.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_OFST 28
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_OFST 32
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_LEN 4
/* Set to MAC_ID_NULL to request no source MAC replacement. */
@@ -28347,6 +24509,24 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_OFST 0
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_LBN 14
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_WIDTH 1
/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_OFST 4
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_LEN 2
@@ -28372,19 +24552,23 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_OFST 20
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_LEN 4
/* Allows an action set to trigger several counter updates. Set to
- * COUNTER_LIST_ID_NULL to request no counter action.
+ * MAE_COUNTER_ID_NULL to request no counter action.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_OFST 24
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
/* If a driver only wished to update one counter within this action set, then
* it can supply a COUNTER_ID instead of allocating a single-element counter
* list. The ID must have been allocated with COUNTER_TYPE=AR. This field
- * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
- * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
* COUNTER_ID.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_OFST 28
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_OFST 32
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_LEN 4
/* Set to MAC_ID_NULL to request no source MAC replacement. */
@@ -28437,6 +24621,172 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_LBN 6
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_WIDTH 1
+/* MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN msgrequest: Only supported if
+ * MAE_ACTION_SET_ALLOC_V3_SUPPORTED is advertised in
+ * MC_CMD_GET_CAPABILITIES_V10_OUT.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LEN 53
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_LBN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_LBN 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_LBN 9
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_LBN 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_LBN 11
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_LBN 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_LBN 13
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_LBN 14
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_WIDTH 1
+/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_OFST 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_LEN 2
+/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_OFST 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TCI value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_OFST 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TPID value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_OFST 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_LEN 2
+/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_OFST 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_LEN 4
+/* Set to ENCAP_HEADER_ID_NULL to request no encap action */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_OFST 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_LEN 4
+/* An m-port selector identifying the m-port that the modified packet should be
+ * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the
+ * packet.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_OFST 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_LEN 4
+/* Allows an action set to trigger several counter updates. Set to
+ * MAE_COUNTER_ID_NULL to request no counter action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_OFST 24
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
+/* If a driver only wished to update one counter within this action set, then
+ * it can supply a COUNTER_ID instead of allocating a single-element counter
+ * list. The ID must have been allocated with COUNTER_TYPE=AR. This field
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * COUNTER_ID.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_OFST 28
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_OFST 32
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_LEN 4
+/* Set to MAC_ID_NULL to request no source MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_OFST 36
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_LEN 4
+/* Set to MAC_ID_NULL to request no destination MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_OFST 40
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_LEN 4
+/* Source m-port ID to be reported for DO_SET_SRC_MPORT action. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_OFST 44
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_LEN 4
+/* Actions for modifying the Differentiated Services Code-Point (DSCP) bits
+ * within IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_LEN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_WIDTH 6
+/* Actions for modifying the Explicit Congestion Notification (ECN) bits within
+ * IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_LBN 5
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_LBN 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_WIDTH 1
+/* Actions for overwriting CH_ROUTE subfields. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_WIDTH 1
+/* Override outgoing CH_VC to network port for DO_SET_NET_CHAN action. Cannot
+ * be used in conjunction with DO_SET_SRC_MPORT action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_OFST 52
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_LEN 1
+
/* MC_CMD_MAE_ACTION_SET_ALLOC_OUT msgresponse */
#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN 4
/* The MSB of the AS_ID is guaranteed to be clear if the ID is not
@@ -28680,58 +25030,6 @@
#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM 32
#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM_MCDI2 32
-
-/***********************************/
-/* MC_CMD_MAE_OUTER_RULE_UPDATE
- * Atomically change the response of an Outer Rule.
- */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE 0x17d
-#undef MC_CMD_0x17d_PRIVILEGE_CTG
-
-#define MC_CMD_0x17d_PRIVILEGE_CTG SRIOV_CTG_MAE
-
-/* MC_CMD_MAE_OUTER_RULE_UPDATE_IN msgrequest */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_LEN 16
-/* ID of outer rule to update */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_OFST 0
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_LEN 4
-/* Packets matching the rule will be parsed with this encapsulation. */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_OFST 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MAE_MCDI_ENCAP_TYPE */
-/* This field controls the actions that are performed when a rule is hit. */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_LEN 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_LBN 0
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_LBN 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_WIDTH 2
-/* Enum values, see field(s): */
-/* MAE_CT_VNI_MODE */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_LBN 3
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_LBN 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_LBN 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_WIDTH 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_LBN 16
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_WIDTH 16
-/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT
- * flag is set. The ID must have been allocated with COUNTER_TYPE=OR.
- */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_OFST 12
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_LEN 4
-
-/* MC_CMD_MAE_OUTER_RULE_UPDATE_OUT msgresponse */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_OUT_LEN 0
-
/* MAE_ACTION_RULE_RESPONSE structuredef */
#define MAE_ACTION_RULE_RESPONSE_LEN 16
#define MAE_ACTION_RULE_RESPONSE_ASL_ID_OFST 0
@@ -29122,142 +25420,6 @@
#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LBN 352
#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_WIDTH 32
-/* MAE_MPORT_DESC_V2 structuredef */
-#define MAE_MPORT_DESC_V2_LEN 56
-#define MAE_MPORT_DESC_V2_MPORT_ID_OFST 0
-#define MAE_MPORT_DESC_V2_MPORT_ID_LEN 4
-#define MAE_MPORT_DESC_V2_MPORT_ID_LBN 0
-#define MAE_MPORT_DESC_V2_MPORT_ID_WIDTH 32
-/* Reserved for future purposes, contains information independent of caller */
-#define MAE_MPORT_DESC_V2_FLAGS_OFST 4
-#define MAE_MPORT_DESC_V2_FLAGS_LEN 4
-#define MAE_MPORT_DESC_V2_FLAGS_LBN 32
-#define MAE_MPORT_DESC_V2_FLAGS_WIDTH 32
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_OFST 8
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LEN 4
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_LBN 0
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_WIDTH 1
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_LBN 1
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_WIDTH 1
-#define MAE_MPORT_DESC_V2_CAN_DELETE_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_DELETE_LBN 2
-#define MAE_MPORT_DESC_V2_CAN_DELETE_WIDTH 1
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_OFST 8
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_LBN 3
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_WIDTH 1
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LBN 64
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_WIDTH 32
-/* Not the ideal name; it's really the type of thing connected to the m-port */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_OFST 12
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_LEN 4
-/* enum: Connected to a MAC... */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_NET_PORT 0x0
-/* enum: Adds metadata and delivers to another m-port */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_ALIAS 0x1
-/* enum: Connected to a VNIC. */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_VNIC 0x2
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_LBN 96
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_WIDTH 32
-/* 128-bit value available to drivers for m-port identification. */
-#define MAE_MPORT_DESC_V2_UUID_OFST 16
-#define MAE_MPORT_DESC_V2_UUID_LEN 16
-#define MAE_MPORT_DESC_V2_UUID_LBN 128
-#define MAE_MPORT_DESC_V2_UUID_WIDTH 128
-/* Big wadge of space reserved for other common properties */
-#define MAE_MPORT_DESC_V2_RESERVED_OFST 32
-#define MAE_MPORT_DESC_V2_RESERVED_LEN 8
-#define MAE_MPORT_DESC_V2_RESERVED_LO_OFST 32
-#define MAE_MPORT_DESC_V2_RESERVED_LO_LEN 4
-#define MAE_MPORT_DESC_V2_RESERVED_LO_LBN 256
-#define MAE_MPORT_DESC_V2_RESERVED_LO_WIDTH 32
-#define MAE_MPORT_DESC_V2_RESERVED_HI_OFST 36
-#define MAE_MPORT_DESC_V2_RESERVED_HI_LEN 4
-#define MAE_MPORT_DESC_V2_RESERVED_HI_LBN 288
-#define MAE_MPORT_DESC_V2_RESERVED_HI_WIDTH 32
-#define MAE_MPORT_DESC_V2_RESERVED_LBN 256
-#define MAE_MPORT_DESC_V2_RESERVED_WIDTH 64
-/* Logical port index. Only valid when type NET Port. */
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_OFST 40
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LEN 4
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LBN 320
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_WIDTH 32
-/* The m-port delivered to */
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_OFST 40
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LEN 4
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LBN 320
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_WIDTH 32
-/* The type of thing that owns the VNIC */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_OFST 40
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LBN 320
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_WIDTH 32
-/* The PCIe interface on which the function lives. CJK: We need an enumeration
- * of interfaces that we extend as new interface (types) appear. This belongs
- * elsewhere and should be referenced from here
- */
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_OFST 44
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LBN 352
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_WIDTH 32
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_OFST 48
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LEN 2
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LBN 384
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_WIDTH 16
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_OFST 50
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LEN 2
-/* enum: Indicates that the function is a PF */
-#define MAE_MPORT_DESC_V2_VF_IDX_NULL 0xffff
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LBN 400
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_WIDTH 16
-/* Reserved. Should be ignored for now. */
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_OFST 44
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LBN 352
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_WIDTH 32
-/* A client handle for the VNIC's owner. Only valid for type VNIC. */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_OFST 52
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LBN 416
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_MAE_MPORT_ENUMERATE
- * Deprecated in favour of MAE_MPORT_READ_JOURNAL. Support for this command
- * will be removed at some future point.
- */
-#define MC_CMD_MAE_MPORT_ENUMERATE 0x17c
-#undef MC_CMD_0x17c_PRIVILEGE_CTG
-
-#define MC_CMD_0x17c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MAE_MPORT_ENUMERATE_IN msgrequest */
-#define MC_CMD_MAE_MPORT_ENUMERATE_IN_LEN 0
-
-/* MC_CMD_MAE_MPORT_ENUMERATE_OUT msgresponse */
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMIN 8
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX 252
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LEN(num) (8+1*(num))
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_NUM(len) (((len)-8)/1)
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_OFST 0
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_LEN 4
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_OFST 4
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_LEN 4
-/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may
- * grow in future version of this command. Drivers should use a stride of
- * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present.
- */
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_OFST 8
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_LEN 1
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MINNUM 0
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM 244
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1012
-
/***********************************/
/* MC_CMD_MAE_MPORT_READ_JOURNAL
@@ -29570,73 +25732,6 @@
/***********************************/
-/* MC_CMD_TABLE_UPDATE
- * Update an existing entry in a table with a new response value. May return
- * EINVAL for unknown table ID or other bad request parameters, ENOENT if the
- * entry does not already exist, or EPERM if the operation is not permitted. In
- * case of an error, the additional MCDI error argument field returns the raw
- * error code from the underlying CAM driver.
- */
-#define MC_CMD_TABLE_UPDATE 0x1ce
-#undef MC_CMD_0x1ce_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ce_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TABLE_UPDATE_IN msgrequest */
-#define MC_CMD_TABLE_UPDATE_IN_LENMIN 16
-#define MC_CMD_TABLE_UPDATE_IN_LENMAX 252
-#define MC_CMD_TABLE_UPDATE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_TABLE_UPDATE_IN_LEN(num) (12+4*(num))
-#define MC_CMD_TABLE_UPDATE_IN_DATA_NUM(len) (((len)-12)/4)
-/* Table identifier. */
-#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_OFST 0
-#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_LEN 4
-/* Enum values, see field(s): */
-/* TABLE_ID */
-/* Width in bits of supplied key data (must match table properties). */
-#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_OFST 4
-#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_LEN 2
-/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
- * when allocated MASK_ID is used instead).
- */
-#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_OFST 6
-#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_LEN 2
-/* Width in bits of supplied response data (for INSERT and UPDATE operations
- * this must match the table properties; for DELETE operations, no response
- * data is required and this must be 0).
- */
-#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_OFST 8
-#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_LEN 2
-/* Mask ID for STCAM table - used instead of mask data if the table descriptor
- * reports ALLOC_MASKS==1. Otherwise set to 0.
- */
-#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_OFST 6
-#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_LEN 2
-/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
-#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_OFST 8
-#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_LEN 2
-/* (32-bit alignment padding - set to 0) */
-#define MC_CMD_TABLE_UPDATE_IN_RESERVED_OFST 10
-#define MC_CMD_TABLE_UPDATE_IN_RESERVED_LEN 2
-/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
- * data values. Each of these items is logically treated as a single wide N-bit
- * value, in which the individual fields have been placed within that value per
- * the LBN and WIDTH information from the table field descriptors. The wide
- * N-bit value is padded with 0 bits at the MSB end if necessary to make a
- * multiple of 32 bits. The value is then packed into this command as a
- * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
- */
-#define MC_CMD_TABLE_UPDATE_IN_DATA_OFST 12
-#define MC_CMD_TABLE_UPDATE_IN_DATA_LEN 4
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MINNUM 1
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM 60
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM_MCDI2 252
-
-/* MC_CMD_TABLE_UPDATE_OUT msgresponse */
-#define MC_CMD_TABLE_UPDATE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_TABLE_DELETE
* Delete an existing entry in a table. May return EINVAL for unknown table ID
* or other bad request parameters, ENOENT if the entry does not exist, or
@@ -29702,5 +25797,124 @@
/* MC_CMD_TABLE_DELETE_OUT msgresponse */
#define MC_CMD_TABLE_DELETE_OUT_LEN 0
+/* MC_CMD_QUEUE_HANDLE structuredef: On X4, to distinguish between full-
+ * featured (X2-style) VIs and low-latency (X3-style) queues, we use the top
+ * bits of the queue handle to specify the queue type in all MCDI calls which
+ * refer to VIs/queues. These bits should be masked off when indexing into a
+ * queue in the BAR.
+ */
+#define MC_CMD_QUEUE_HANDLE_LEN 4
+/* Combined queue number and type. This is the ID returned by and passed into
+ * MCDI calls that use queues.
+ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LEN 4
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_LBN 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_WIDTH 24
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LBN 24
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_WIDTH 8
+/* enum: Indicates that the queue instance is a full-featured VI */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_FF_VI 0x0
+/* enum: Indicates that the queue instance is a LL TXQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_TXQ 0x1
+/* enum: Indicates that the queue instance is a LL RXQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_RXQ 0x2
+/* enum: Indicates that the queue instance is a LL EVQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_EVQ 0x3
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LBN 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_ALLOC_LL_QUEUES
+ * Allocate low latency (X3-style) queues for current PCI function. Can be
+ * called more than once if desired to allocate more queues.
+ */
+#define MC_CMD_ALLOC_LL_QUEUES 0x1dd
+#undef MC_CMD_0x1dd_PRIVILEGE_CTG
+
+#define MC_CMD_0x1dd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_LL_QUEUES_IN msgrequest */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_LEN 24
+/* The minimum number of TXQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_OFST 0
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_LEN 4
+/* The maximum number of TXQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_OFST 4
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_LEN 4
+/* The minimum number of RXQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_OFST 8
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_LEN 4
+/* The maximum number of RXQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_OFST 12
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_LEN 4
+/* The minimum number of EVQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_OFST 16
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_LEN 4
+/* The maximum number of EVQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_OFST 20
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_LEN 4
+
+/* MC_CMD_ALLOC_LL_QUEUES_OUT msgresponse */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMIN 16
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX 252
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LEN(num) (12+4*(num))
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_NUM(len) (((len)-12)/4)
+/* The number of TXQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_OFST 0
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_LEN 4
+/* The number of RXQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_OFST 4
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_LEN 4
+/* The number of EVQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_OFST 8
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_LEN 4
+/* A list of allocated queues, returned as MC_CMD_QUEUE_HANDLEs, not
+ * necessarily contiguous. TXQs are first in the list, followed by RXQs then
+ * EVQs. The type of each queue is indicated by the top bits (see the
+ * QUEUE_TYPE enum)
+ */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_OFST 12
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_LEN 4
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MINNUM 1
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM 60
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM_MCDI2 252
+
+
+/***********************************/
+/* MC_CMD_FREE_LL_QUEUES
+ * Free low latency (X3-style) queues for current PCI function.
+ */
+#define MC_CMD_FREE_LL_QUEUES 0x1de
+#undef MC_CMD_0x1de_PRIVILEGE_CTG
+
+#define MC_CMD_0x1de_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_LL_QUEUES_IN msgrequest */
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMIN 8
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX 252
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX_MCDI2 1020
+#define MC_CMD_FREE_LL_QUEUES_IN_LEN(num) (4+4*(num))
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_NUM(len) (((len)-4)/4)
+/* The number of queues to free. */
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_OFST 0
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_LEN 4
+/* A list of queues to free, as a list of MC_CMD_QUEUE_HANDLEs. They must have
+ * all been previously allocated by MC_CMD_ALLOC_LL_QUEUES. The type of each
+ * queue should be indicated by the top bits.
+ */
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_OFST 4
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_LEN 4
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MINNUM 1
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM 62
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM_MCDI2 254
+
+/* MC_CMD_FREE_LL_QUEUES_OUT msgresponse */
+#define MC_CMD_FREE_LL_QUEUES_OUT_LEN 0
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index ad4694fa3dda..7b236d291d8c 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -17,58 +17,6 @@
#include "selftest.h"
#include "mcdi_port_common.h"
-static int efx_mcdi_mdio_read(struct net_device *net_dev,
- int prtad, int devad, u16 addr)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
- MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
-}
-
-static int efx_mcdi_mdio_write(struct net_device *net_dev,
- int prtad, int devad, u16 addr, u16 value)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
- MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return 0;
-}
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
{
@@ -97,12 +45,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
{
int rc;
- /* Set up MDIO structure for PHY */
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->mdio.mdio_read = efx_mcdi_mdio_read;
- efx->mdio.mdio_write = efx_mcdi_mdio_write;
-
- /* Fill out MDIO structure, loopback modes, and initial link state */
+ /* Fill out loopback modes and initial link state */
rc = efx_mcdi_phy_probe(efx);
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 76ea26722ca4..dae684194ac8 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -448,15 +448,6 @@ int efx_mcdi_phy_probe(struct efx_nic *efx)
efx->phy_data = phy_data;
efx->phy_type = phy_data->type;
- efx->mdio_bus = phy_data->channel;
- efx->mdio.prtad = phy_data->port;
- efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
- efx->mdio.mode_support = 0;
- if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
- if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
-
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
mcdi_to_ethtool_linkset(phy_data->media, caps,
@@ -546,8 +537,6 @@ void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ks
cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
cmd->base.phy_address = phy_cfg->port;
cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
- cmd->base.mdio_support = (efx->mdio.mode_support &
- (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
cmd->link_modes.supported);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 620ba6ef3514..5c0f306fb019 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -15,7 +15,7 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/timer.h>
-#include <linux/mdio.h>
+#include <linux/mii.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/device.h>
@@ -831,6 +831,7 @@ struct efx_arfs_rule {
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
+ * @net_dev_tracker: reference tracker entry for @net_dev
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
@@ -838,6 +839,7 @@ struct efx_arfs_rule {
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
+ netdevice_tracker net_dev_tracker;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
@@ -954,8 +956,6 @@ struct efx_mae;
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
* @phy_data: PHY private data (including PHY-specific stats)
- * @mdio: PHY MDIO interface
- * @mdio_bus: PHY MDIO bus ID (only used by Siena)
* @phy_mode: PHY operating mode. Serialised by @mac_lock.
* @link_advertising: Autonegotiation advertising flags
* @fec_config: Forward Error Correction configuration flags. For bit positions
@@ -1004,6 +1004,7 @@ struct efx_mae;
* @dl_port: devlink port associated with the PF
* @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window.
+ * @reflash_mutex: Mutex for serialising firmware reflash operations.
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -1129,8 +1130,6 @@ struct efx_nic {
unsigned int phy_type;
void *phy_data;
- struct mdio_if_info mdio;
- unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
__ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
@@ -1189,6 +1188,7 @@ struct efx_nic {
struct devlink_port *dl_port;
unsigned int mem_bar;
u32 reg_base;
+ struct mutex reflash_mutex;
/* The following fields may be written more often */
@@ -1381,6 +1381,8 @@ struct efx_udp_tunnel {
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
* @option_descriptors: NIC supports TX option descriptors
+ * @flash_auto_partition: firmware flash uses AUTO partition, driver does
+ * not need to perform image parsing
* @min_interrupt_mode: Lowest capability interrupt mode supported
* from &enum efx_int_mode.
* @timer_period_max: Maximum period of interrupt timer (in ticks)
@@ -1557,6 +1559,7 @@ struct efx_nic_type {
bool can_rx_scatter;
bool always_rx_scatter;
bool option_descriptors;
+ bool flash_auto_partition;
unsigned int min_interrupt_mode;
unsigned int timer_period_max;
netdev_features_t offload_features;
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index ab358fe13e1d..f4f75299dfa9 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -285,7 +285,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
- del_timer_sync(&rx_queue->slow_fill);
+ timer_delete_sync(&rx_queue->slow_fill);
if (rx_queue->grant_credits)
flush_work(&rx_queue->grant_work);
@@ -352,7 +352,8 @@ void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
void efx_rx_slow_fill(struct timer_list *t)
{
- struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+ struct efx_rx_queue *rx_queue = timer_container_of(rx_queue, t,
+ slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(rx_queue);
@@ -897,7 +898,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
- dev_put(req->net_dev);
+ netdev_put(req->net_dev, &req->net_dev_tracker);
}
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -989,7 +990,8 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
}
/* Queue the request */
- dev_hold(req->net_dev = net_dev);
+ req->net_dev = net_dev;
+ netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c
index 3f7899daa86a..c8f0fb43e285 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi.c
+++ b/drivers/net/ethernet/sfc/siena/mcdi.c
@@ -534,7 +534,7 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
* of it aborting the next request.
*/
if (!timeout)
- del_timer_sync(&mcdi->async_timer);
+ timer_delete_sync(&mcdi->async_timer);
spin_lock(&mcdi->async_lock);
async = list_first_entry(&mcdi->async_list,
@@ -609,7 +609,7 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
static void efx_mcdi_timeout_async(struct timer_list *t)
{
- struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
+ struct efx_mcdi_iface *mcdi = timer_container_of(mcdi, t, async_timer);
efx_mcdi_complete_async(mcdi, true);
}
@@ -1145,7 +1145,7 @@ void efx_siena_mcdi_flush_async(struct efx_nic *efx)
/* We must be in poll or fail mode so no more requests can be queued */
BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
- del_timer_sync(&mcdi->async_timer);
+ timer_delete_sync(&mcdi->async_timer);
/* If a request is still running, make sure we give the MC
* time to complete it so that the response won't overwrite our
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index 9785eff10607..2be3bad3c993 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -753,6 +753,7 @@ struct efx_arfs_rule {
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
+ * @net_dev_tracker: reference tracker entry for @net_dev
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
@@ -760,6 +761,7 @@ struct efx_arfs_rule {
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
+ netdevice_tracker net_dev_tracker;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 082e35c6caaa..98d27174015d 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -284,7 +284,7 @@ void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
- del_timer_sync(&rx_queue->slow_fill);
+ timer_delete_sync(&rx_queue->slow_fill);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
@@ -349,7 +349,8 @@ void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
void efx_siena_rx_slow_fill(struct timer_list *t)
{
- struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+ struct efx_rx_queue *rx_queue = timer_container_of(rx_queue, t,
+ slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(rx_queue);
@@ -888,7 +889,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
- dev_put(req->net_dev);
+ netdev_put(req->net_dev, &req->net_dev_tracker);
}
int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -980,7 +981,8 @@ int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
}
/* Queue the request */
- dev_hold(req->net_dev = net_dev);
+ req->net_dev = net_dev;
+ netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 0d93164988fc..fa94aa3cd5fe 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -1043,7 +1043,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
return -EOPNOTSUPP;
}
if (fa->ct.action) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule",
fa->ct.action);
return -EOPNOTSUPP;
}
@@ -1056,7 +1056,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
act->zone = ct_zone;
break;
default:
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule",
fa->id);
return -EOPNOTSUPP;
}
@@ -1581,7 +1581,7 @@ static int efx_tc_flower_replace_foreign_lhs(struct efx_nic *efx,
type = efx_tc_indr_netdev_type(net_dev);
if (type == EFX_ENCAP_TYPE_NONE) {
- NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device\n");
+ NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
index d90206f27161..c0603f54cec3 100644
--- a/drivers/net/ethernet/sfc/tc_conntrack.c
+++ b/drivers/net/ethernet/sfc/tc_conntrack.c
@@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
void *cb_priv);
static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
- .key_len = offsetof(struct efx_tc_ct_zone, linkage),
+ .key_len = sizeof_field(struct efx_tc_ct_zone, zone),
.key_offset = 0,
.head_offset = offsetof(struct efx_tc_ct_zone, linkage),
};
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 4535579018c9..39731069d99e 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -570,7 +570,7 @@ static inline void ioc3_setup_duplex(struct ioc3_private *ip)
static void ioc3_timer(struct timer_list *t)
{
- struct ioc3_private *ip = from_timer(ip, t, ioc3_timer);
+ struct ioc3_private *ip = timer_container_of(ip, t, ioc3_timer);
/* Print the link status if it has changed */
mii_check_media(&ip->mii, 1, 0);
@@ -718,7 +718,7 @@ static void ioc3_init(struct net_device *dev)
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3_ethregs *regs = ip->regs;
- del_timer_sync(&ip->ioc3_timer); /* Kill if running */
+ timer_delete_sync(&ip->ioc3_timer); /* Kill if running */
writel(EMCR_RST, &regs->emcr); /* Reset */
readl(&regs->emcr); /* Flush WB */
@@ -801,7 +801,7 @@ static int ioc3_close(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- del_timer_sync(&ip->ioc3_timer);
+ timer_delete_sync(&ip->ioc3_timer);
netif_stop_queue(dev);
@@ -950,7 +950,7 @@ static int ioc3eth_probe(struct platform_device *pdev)
return 0;
out_stop:
- del_timer_sync(&ip->ioc3_timer);
+ timer_delete_sync(&ip->ioc3_timer);
if (ip->rxr)
dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr,
ip->rxr_dma);
@@ -971,7 +971,7 @@ static void ioc3eth_remove(struct platform_device *pdev)
dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma);
unregister_netdev(dev);
- del_timer_sync(&ip->ioc3_timer);
+ timer_delete_sync(&ip->ioc3_timer);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index dda4e488c77a..15e46e6ac262 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -758,7 +758,7 @@ static irqreturn_t sis190_irq(int irq, void *__dev)
if (status & LinkChange) {
netif_info(tp, intr, dev, "link change\n");
- del_timer(&tp->timer);
+ timer_delete(&tp->timer);
schedule_work(&tp->phy_task);
}
@@ -1023,7 +1023,7 @@ out_unlock:
static void sis190_phy_timer(struct timer_list *t)
{
- struct sis190_private *tp = from_timer(tp, t, timer);
+ struct sis190_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
if (likely(netif_running(dev)))
@@ -1034,7 +1034,7 @@ static inline void sis190_delete_timer(struct net_device *dev)
{
struct sis190_private *tp = netdev_priv(dev);
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
}
static inline void sis190_request_timer(struct net_device *dev)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 85b850372efe..b461918dc5f4 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -468,7 +468,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
/* We do a request_region() to register /proc/ioports info. */
- ret = pci_request_regions(pci_dev, "sis900");
+ ret = pcim_request_all_regions(pci_dev, "sis900");
if (ret)
goto err_out;
@@ -1314,7 +1314,8 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision)
static void sis900_timer(struct timer_list *t)
{
- struct sis900_private *sis_priv = from_timer(sis_priv, t, timer);
+ struct sis900_private *sis_priv = timer_container_of(sis_priv, t,
+ timer);
struct net_device *net_dev = sis_priv->mii_info.dev;
struct mii_phy *mii_phy = sis_priv->mii;
static const int next_tick = 5*HZ;
@@ -1983,7 +1984,7 @@ static int sis900_close(struct net_device *net_dev)
/* Stop the chip's Tx and Rx Status Machine */
sw32(cr, RxDIS | TxDIS | sr32(cr));
- del_timer(&sis_priv->timer);
+ timer_delete(&sis_priv->timer);
free_irq(pdev->irq, net_dev);
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 013e90d69182..45f703fe0e5a 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -850,7 +850,7 @@ static void check_media(struct net_device *dev)
static void epic_timer(struct timer_list *t)
{
- struct epic_private *ep = from_timer(ep, t, timer);
+ struct epic_private *ep = timer_container_of(ep, t, timer);
struct net_device *dev = ep->mii.dev;
void __iomem *ioaddr = ep->ioaddr;
int next_tick = 5*HZ;
@@ -1292,7 +1292,7 @@ static int epic_close(struct net_device *dev)
netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
er32(INTSTAT));
- del_timer_sync(&ep->timer);
+ timer_delete_sync(&ep->timer);
epic_disable_int(dev, ep);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 86e3ec25df07..cc0c75694351 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1105,7 +1105,7 @@ static int smc_close(struct net_device *dev)
outw(CTL_POWERDOWN, ioaddr + CONTROL );
link->open--;
- del_timer_sync(&smc->media);
+ timer_delete_sync(&smc->media);
return 0;
} /* smc_close */
@@ -1713,7 +1713,7 @@ static void smc_reset(struct net_device *dev)
static void media_check(struct timer_list *t)
{
- struct smc_private *smc = from_timer(smc, t, media);
+ struct smc_private *smc = timer_container_of(smc, t, media);
struct net_device *dev = smc->mii_if.dev;
unsigned int ioaddr = dev->base_addr;
u_short i, media, saved_bank;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index f539813878f5..2e1106097965 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -43,7 +43,6 @@
#include <linux/smsc911x.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/acpi.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index dc99821c6226..ee890de69ffe 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -970,7 +970,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
struct netsec_desc *desc = &dring->desc[idx];
struct page *page = virt_to_page(desc->addr);
- u32 xdp_result = NETSEC_XDP_PASS;
+ u32 metasize, xdp_result = NETSEC_XDP_PASS;
struct sk_buff *skb = NULL;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
@@ -1019,7 +1019,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
prefetch(desc->addr);
xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
- pkt_len, false);
+ pkt_len, true);
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
@@ -1048,6 +1048,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->protocol = eth_type_trans(skb, priv->ndev);
if (priv->rx_cksum_offload_flag &&
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 6658536a4e17..67fa879b1e52 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -3,6 +3,7 @@ config STMMAC_ETH
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on ETHTOOL_NETLINK
select MII
select PCS_XPCS
select PAGE_POOL
@@ -131,6 +132,17 @@ config DWMAC_QCOM_ETHQOS
This selects the Qualcomm ETHQOS glue layer support for the
stmmac device driver.
+config DWMAC_RENESAS_GBETH
+ tristate "Renesas RZ/V2H(P) GBETH support"
+ default ARCH_RENESAS
+ depends on OF && (ARCH_RENESAS || COMPILE_TEST)
+ help
+ Support for Gigabit Ethernet Interface (GBETH) on Renesas
+ RZ/V2H(P) SoCs.
+
+ This selects the Renesas RZ/V2H(P) Soc specific glue layer support
+ for the stmmac device driver.
+
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
@@ -154,6 +166,18 @@ config DWMAC_RZN1
the stmmac device driver. This support can make use of a custom MII
converter PCS device.
+config DWMAC_S32
+ tristate "NXP S32G/S32R GMAC support"
+ default ARCH_S32
+ depends on OF && (ARCH_S32 || COMPILE_TEST)
+ help
+ Support for ethernet controller on NXP S32CC SOCs.
+
+ This selects NXP SoC glue layer support for the stmmac
+ device driver. This driver is used for the S32CC series
+ SOCs GMAC ethernet controller, ie. S32G2xx, S32G3xx and
+ S32R45.
+
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_INTEL_SOCFPGA
@@ -169,6 +193,17 @@ config DWMAC_SOCFPGA
for the stmmac device driver. This driver is used for
arria5 and cyclone5 FPGA SoCs.
+config DWMAC_SOPHGO
+ tristate "Sophgo dwmac support"
+ depends on OF && (ARCH_SOPHGO || COMPILE_TEST)
+ default m if ARCH_SOPHGO
+ help
+ Support for ethernet controllers on Sophgo RISC-V SoCs
+
+ This selects the Sophgo SoC specific glue layer support
+ for the stmmac device driver. This driver is used for the
+ ethernet controllers on various Sophgo SoCs.
+
config DWMAC_STARFIVE
tristate "StarFive dwmac support"
depends on OF && (ARCH_STARFIVE || COMPILE_TEST)
@@ -295,6 +330,7 @@ config DWMAC_INTEL
default X86
depends on X86 && STMMAC_ETH && PCI
depends on COMMON_CLK
+ depends on ACPI
help
This selects the Intel platform specific bus support for the
stmmac driver. This driver is used for Intel Quark/EHL/TGL.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 2389fd261344..b591d93f8503 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
- stmmac_xdp.o stmmac_est.o stmmac_fpe.o \
+ stmmac_xdp.o stmmac_est.o stmmac_fpe.o stmmac_vlan.o \
$(stmmac-y)
stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
@@ -20,9 +20,12 @@ obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
+obj-$(CONFIG_DWMAC_RENESAS_GBETH) += dwmac-renesas-gbeth.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
+obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
+obj-$(CONFIG_DWMAC_SOPHGO) += dwmac-sophgo.o
obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 1367fa5c9b8e..ea5da5793362 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -101,8 +101,8 @@ struct stmmac_rxq_stats {
/* Updates on each CPU protected by not allowing nested irqs. */
struct stmmac_pcpu_stats {
struct u64_stats_sync syncp;
- u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
- u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t rx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t tx_normal_irq_n[MTL_MAX_TX_QUEUES];
};
/* Extra statistic and debug information exposed by ethtool */
@@ -257,6 +257,8 @@ struct stmmac_safety_stats {
#define CSR_F_150M 150000000
#define CSR_F_250M 250000000
#define CSR_F_300M 300000000
+#define CSR_F_500M 500000000
+#define CSR_F_800M 800000000
#define MAC_CSR_H_FRQ_MASK 0x20
@@ -528,6 +530,20 @@ struct dma_features {
#define STMMAC_DEFAULT_TWT_LS 0x1E
#define STMMAC_ET_MAX 0xFFFFF
+/* Common LPI register bits */
+#define LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable, gmac4, xgmac2 only */
+#define LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable, gmac4 only */
+#define LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN BIT(18) /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST BIT(9) /* Receive LPI state, gmac1000 only? */
+#define LPI_CTRL_STATUS_TLPIST BIT(8) /* Transmit LPI state, gmac1000 only? */
+#define LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
+
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
@@ -543,18 +559,8 @@ struct dma_features {
#define STMMAC_VLAN_INSERT 0x2
#define STMMAC_VLAN_REPLACE 0x3
-extern const struct stmmac_desc_ops enh_desc_ops;
-extern const struct stmmac_desc_ops ndesc_ops;
-
struct mac_device_info;
-extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern const struct stmmac_hwtimestamp dwmac1000_ptp;
-extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
-
-extern const struct ptp_clock_info stmmac_ptp_clock_ops;
-extern const struct ptp_clock_info dwmac1000_ptp_clock_ops;
-
struct mac_link {
u32 caps;
u32 speed_mask;
@@ -596,6 +602,7 @@ struct mac_device_info {
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
const struct stmmac_est_ops *est;
+ const struct stmmac_vlan_ops *vlan;
struct dw_xpcs *xpcs;
struct phylink_pcs *phylink_pcs;
struct mii_regs mii; /* MII register Addresses */
@@ -641,8 +648,4 @@ void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
-extern const struct stmmac_mode_ops ring_mode_ops;
-extern const struct stmmac_mode_ops chain_mode_ops;
-extern const struct stmmac_desc_ops dwmac4_desc_ops;
-
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index ef99ef3f1ab4..84072c8ed741 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -59,18 +59,18 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1);
}
-static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
+static struct anarion_gmac *
+anarion_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
struct anarion_gmac *gmac;
- phy_interface_t phy_mode;
void __iomem *ctl_block;
- int err;
ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
- err = PTR_ERR(ctl_block);
- dev_err(&pdev->dev, "Cannot get reset region (%d)!\n", err);
- return ERR_PTR(err);
+ dev_err(&pdev->dev, "Cannot get reset region (%pe)!\n",
+ ctl_block);
+ return ERR_CAST(ctl_block);
}
gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
@@ -79,21 +79,11 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
gmac->ctl_block = ctl_block;
- err = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
- if (err)
- return ERR_PTR(err);
-
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_RGMII:
- fallthrough;
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (phy_interface_mode_is_rgmii(plat_dat->phy_interface)) {
gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII;
- break;
- default:
- dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n",
- phy_mode);
+ } else {
+ dev_err(&pdev->dev, "Unsupported phy-mode (%s)\n",
+ phy_modes(plat_dat->phy_interface));
return ERR_PTR(-ENOTSUPP);
}
@@ -111,20 +101,19 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- gmac = anarion_config_dt(pdev);
- if (IS_ERR(gmac))
- return PTR_ERR(gmac);
-
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
+ gmac = anarion_config_dt(pdev, plat_dat);
+ if (IS_ERR(gmac))
+ return PTR_ERR(gmac);
+
plat_dat->init = anarion_gmac_init;
plat_dat->exit = anarion_gmac_exit;
- anarion_gmac_init(pdev, gmac);
plat_dat->bsp_priv = gmac;
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id anarion_dwmac_match[] = {
@@ -135,7 +124,6 @@ MODULE_DEVICE_TABLE(of, anarion_dwmac_match);
static struct platform_driver anarion_dwmac_driver = {
.probe = anarion_dwmac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "anarion-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 83290e707df5..09ae16e026eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -29,10 +29,6 @@ struct tegra_eqos {
void __iomem *regs;
struct reset_control *rst;
- struct clk *clk_master;
- struct clk *clk_slave;
- struct clk *clk_tx;
- struct clk *clk_rx;
struct gpio_desc *reset;
};
@@ -46,7 +42,9 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
u32 a_index = 0;
if (!plat_dat->axi) {
- plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
+ plat_dat->axi = devm_kzalloc(&pdev->dev,
+ sizeof(struct stmmac_axi),
+ GFP_KERNEL);
if (!plat_dat->axi)
return -ENOMEM;
@@ -123,49 +121,9 @@ static int dwc_qos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *stmmac_res)
{
- int err;
-
- plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(plat_dat->stmmac_clk)) {
- dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- return PTR_ERR(plat_dat->stmmac_clk);
- }
-
- err = clk_prepare_enable(plat_dat->stmmac_clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
- err);
- return err;
- }
-
- plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(plat_dat->pclk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- err = PTR_ERR(plat_dat->pclk);
- goto disable;
- }
-
- err = clk_prepare_enable(plat_dat->pclk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
- err);
- goto disable;
- }
+ plat_dat->pclk = stmmac_pltfr_find_clk(plat_dat, "phy_ref_clk");
return 0;
-
-disable:
- clk_disable_unprepare(plat_dat->stmmac_clk);
- return err;
-}
-
-static void dwc_qos_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- clk_disable_unprepare(priv->plat->pclk);
- clk_disable_unprepare(priv->plat->stmmac_clk);
}
#define SDMEMCOMPPADCTRL 0x8800
@@ -178,35 +136,34 @@ static void dwc_qos_remove(struct platform_device *pdev)
#define AUTO_CAL_STATUS 0x880c
#define AUTO_CAL_STATUS_ACTIVE BIT(31)
-static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void tegra_eqos_fix_speed(void *bsp_priv, int speed, unsigned int mode)
{
- struct tegra_eqos *eqos = priv;
- unsigned long rate = 125000000;
+ struct tegra_eqos *eqos = bsp_priv;
bool needs_calibration = false;
+ struct stmmac_priv *priv;
u32 value;
int err;
switch (speed) {
case SPEED_1000:
- needs_calibration = true;
- rate = 125000000;
- break;
-
case SPEED_100:
needs_calibration = true;
- rate = 25000000;
- break;
+ fallthrough;
case SPEED_10:
- rate = 2500000;
break;
default:
- dev_err(eqos->dev, "invalid speed %u\n", speed);
+ dev_err(eqos->dev, "invalid speed %d\n", speed);
break;
}
if (needs_calibration) {
+ priv = netdev_priv(dev_get_drvdata(eqos->dev));
+
+ /* Calibration should be done with the MDIO bus idle */
+ mutex_lock(&priv->mii->mdio_lock);
+
/* calibrate */
value = readl(eqos->regs + SDMEMCOMPPADCTRL);
value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
@@ -240,33 +197,17 @@ static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mo
value = readl(eqos->regs + SDMEMCOMPPADCTRL);
value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+
+ mutex_unlock(&priv->mii->mdio_lock);
} else {
value = readl(eqos->regs + AUTO_CAL_CONFIG);
value &= ~AUTO_CAL_CONFIG_ENABLE;
writel(value, eqos->regs + AUTO_CAL_CONFIG);
}
-
- err = clk_set_rate(eqos->clk_tx, rate);
- if (err < 0)
- dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
-}
-
-static int tegra_eqos_init(struct platform_device *pdev, void *priv)
-{
- struct tegra_eqos *eqos = priv;
- unsigned long rate;
- u32 value;
-
- rate = clk_get_rate(eqos->clk_slave);
-
- value = (rate / 1000000) - 1;
- writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
-
- return 0;
}
static int tegra_eqos_probe(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
+ struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
{
struct device *dev = &pdev->dev;
@@ -283,59 +224,19 @@ static int tegra_eqos_probe(struct platform_device *pdev,
if (!is_of_node(dev->fwnode))
goto bypass_clk_reset_gpio;
- eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
- if (IS_ERR(eqos->clk_master)) {
- err = PTR_ERR(eqos->clk_master);
- goto error;
- }
-
- err = clk_prepare_enable(eqos->clk_master);
- if (err < 0)
- goto error;
-
- eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
- if (IS_ERR(eqos->clk_slave)) {
- err = PTR_ERR(eqos->clk_slave);
- goto disable_master;
- }
-
- data->stmmac_clk = eqos->clk_slave;
-
- err = clk_prepare_enable(eqos->clk_slave);
- if (err < 0)
- goto disable_master;
-
- eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
- if (IS_ERR(eqos->clk_rx)) {
- err = PTR_ERR(eqos->clk_rx);
- goto disable_slave;
- }
-
- err = clk_prepare_enable(eqos->clk_rx);
- if (err < 0)
- goto disable_slave;
-
- eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
- if (IS_ERR(eqos->clk_tx)) {
- err = PTR_ERR(eqos->clk_tx);
- goto disable_rx;
- }
-
- err = clk_prepare_enable(eqos->clk_tx);
- if (err < 0)
- goto disable_rx;
+ plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx");
eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
if (IS_ERR(eqos->reset)) {
err = PTR_ERR(eqos->reset);
- goto disable_tx;
+ return err;
}
usleep_range(2000, 4000);
gpiod_set_value(eqos->reset, 0);
/* MDIO bus was already reset just above */
- data->mdio_bus_data->needs_reset = false;
+ plat_dat->mdio_bus_data->needs_reset = false;
eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
if (IS_ERR(eqos->rst)) {
@@ -356,29 +257,17 @@ static int tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
bypass_clk_reset_gpio:
- data->fix_mac_speed = tegra_eqos_fix_speed;
- data->init = tegra_eqos_init;
- data->bsp_priv = eqos;
- data->flags |= STMMAC_FLAG_SPH_DISABLE;
-
- err = tegra_eqos_init(pdev, eqos);
- if (err < 0)
- goto reset;
+ plat_dat->fix_mac_speed = tegra_eqos_fix_speed;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->bsp_priv = eqos;
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE |
+ STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
return 0;
-reset:
- reset_control_assert(eqos->rst);
+
reset_phy:
gpiod_set_value(eqos->reset, 1);
-disable_tx:
- clk_disable_unprepare(eqos->clk_tx);
-disable_rx:
- clk_disable_unprepare(eqos->clk_rx);
-disable_slave:
- clk_disable_unprepare(eqos->clk_slave);
-disable_master:
- clk_disable_unprepare(eqos->clk_master);
-error:
+
return err;
}
@@ -388,27 +277,29 @@ static void tegra_eqos_remove(struct platform_device *pdev)
reset_control_assert(eqos->rst);
gpiod_set_value(eqos->reset, 1);
- clk_disable_unprepare(eqos->clk_tx);
- clk_disable_unprepare(eqos->clk_rx);
- clk_disable_unprepare(eqos->clk_slave);
- clk_disable_unprepare(eqos->clk_master);
}
struct dwc_eth_dwmac_data {
int (*probe)(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
+ struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
void (*remove)(struct platform_device *pdev);
+ const char *stmmac_clk_name;
};
static const struct dwc_eth_dwmac_data dwc_qos_data = {
.probe = dwc_qos_probe,
- .remove = dwc_qos_remove,
+ .stmmac_clk_name = "apb_pclk",
};
static const struct dwc_eth_dwmac_data tegra_eqos_data = {
.probe = tegra_eqos_probe,
.remove = tegra_eqos_remove,
+ .stmmac_clk_name = "slave_bus",
+};
+
+static const struct dwc_eth_dwmac_data fsd_eqos_data = {
+ .stmmac_clk_name = "slave_bus",
};
static int dwc_eth_dwmac_probe(struct platform_device *pdev)
@@ -439,9 +330,23 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- ret = data->probe(pdev, plat_dat, &stmmac_res);
+ ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all required clocks\n");
+ plat_dat->num_clks = ret;
+
+ ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n");
+
+ plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat,
+ data->stmmac_clk_name);
+
+ if (data->probe)
+ ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
return ret;
}
@@ -456,7 +361,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
return ret;
remove:
- data->remove(pdev);
+ if (data->remove)
+ data->remove(pdev);
return ret;
}
@@ -464,15 +370,21 @@ remove:
static void dwc_eth_dwmac_remove(struct platform_device *pdev)
{
const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev);
+ struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
- data->remove(pdev);
+ if (data->remove)
+ data->remove(pdev);
+
+ if (plat_dat)
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
{ .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
+ { .compatible = "tesla,fsd-ethqos", .data = &fsd_eqos_data },
{ }
};
MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 641f3cd019a3..889e2bb6f7f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -36,6 +36,8 @@
#define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1)
#define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1)
#define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0)
+#define MX93_GPR_ENET_QOS_CLK_SEL_MASK BIT_MASK(0)
+#define MX93_GPR_CLK_SEL_OFFSET (4)
#define DMA_BUS_MODE 0x00001000
#define DMA_BUS_MODE_SFT_RESET (0x1 << 0)
@@ -49,7 +51,7 @@ struct imx_dwmac_ops {
int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat);
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
+ void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
};
struct imx_priv_data {
@@ -108,13 +110,21 @@ imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
{
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
- int val;
+ int val, ret;
switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
break;
case PHY_INTERFACE_MODE_RMII:
+ if (dwmac->rmii_refclk_ext) {
+ ret = regmap_clear_bits(dwmac->intf_regmap,
+ dwmac->intf_reg_off +
+ MX93_GPR_CLK_SEL_OFFSET,
+ MX93_GPR_ENET_QOS_CLK_SEL_MASK);
+ if (ret)
+ return ret;
+ }
val = MX93_GPR_ENET_QOS_INTF_SEL_RMII;
break;
case PHY_INTERFACE_MODE_RGMII:
@@ -182,11 +192,24 @@ static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
/* nothing to do now */
}
-static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int imx_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
+{
+ struct imx_priv_data *dwmac = bsp_priv;
+
+ interface = dwmac->plat_dat->mac_interface;
+ if (interface == PHY_INTERFACE_MODE_RMII ||
+ interface == PHY_INTERFACE_MODE_MII)
+ return 0;
+
+ return stmmac_set_clk_tx_rate(bsp_priv, clk_tx_i, interface, speed);
+}
+
+static void imx_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
{
struct plat_stmmacenet_data *plat_dat;
struct imx_priv_data *dwmac = priv;
- unsigned long rate;
+ long rate;
int err;
plat_dat = dwmac->plat_dat;
@@ -196,18 +219,9 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
(plat_dat->mac_interface == PHY_INTERFACE_MODE_MII))
return;
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_10:
- rate = 2500000;
- break;
- default:
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
+ dev_err(dwmac->dev, "invalid speed %d\n", speed);
return;
}
@@ -216,7 +230,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
}
-static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
{
struct imx_priv_data *dwmac = priv;
unsigned int iface;
@@ -301,15 +315,11 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
* is required by i.MX8MP, i.MX93.
* is optinoal for i.MX8DXL.
*/
- dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
+ dwmac->intf_regmap =
+ syscon_regmap_lookup_by_phandle_args(np, "intf_mode", 1,
+ &dwmac->intf_reg_off);
if (IS_ERR(dwmac->intf_regmap))
return PTR_ERR(dwmac->intf_regmap);
-
- err = of_property_read_u32_index(np, "intf_mode", 1, &dwmac->intf_reg_off);
- if (err) {
- dev_err(dev, "Can't get intf mode reg offset (%d)\n", err);
- return err;
- }
}
return err;
@@ -361,7 +371,6 @@ static int imx_dwmac_probe(struct platform_device *pdev)
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
plat_dat->clks_config = imx_dwmac_clks_config;
- plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
plat_dat->bsp_priv = dwmac;
dwmac->plat_dat = plat_dat;
dwmac->base_addr = stmmac_res.addr;
@@ -370,24 +379,19 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = imx_dwmac_init(pdev, dwmac);
- if (ret)
- goto err_dwmac_init;
-
- if (dwmac->ops->fix_mac_speed)
+ if (dwmac->ops->fix_mac_speed) {
plat_dat->fix_mac_speed = dwmac->ops->fix_mac_speed;
+ } else if (!dwmac->ops->mac_rgmii_txclk_auto_adj) {
+ plat_dat->clk_tx_i = dwmac->clk_tx;
+ plat_dat->set_clk_tx_rate = imx_dwmac_set_clk_tx_rate;
+ }
+
dwmac->plat_dat->fix_soc_reset = dwmac->ops->fix_soc_reset;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
if (ret)
- goto err_drv_probe;
-
- return 0;
+ imx_dwmac_clks_config(dwmac, false);
-err_drv_probe:
- imx_dwmac_exit(pdev, plat_dat->bsp_priv);
-err_dwmac_init:
- imx_dwmac_clks_config(dwmac, false);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 066783d66422..15abe214131f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -56,6 +56,7 @@ enum ingenic_mac_version {
struct ingenic_mac {
const struct ingenic_soc_info *soc_info;
+ struct plat_stmmacenet_data *plat_dat;
struct device *dev;
struct regmap *regmap;
@@ -70,13 +71,13 @@ struct ingenic_soc_info {
int (*set_mode)(struct plat_stmmacenet_data *plat_dat);
};
-static int ingenic_mac_init(struct plat_stmmacenet_data *plat_dat)
+static int ingenic_mac_init(struct platform_device *pdev, void *bsp_priv)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
+ struct ingenic_mac *mac = bsp_priv;
int ret;
if (mac->soc_info->set_mode) {
- ret = mac->soc_info->set_mode(plat_dat);
+ ret = mac->soc_info->set_mode(mac->plat_dat);
if (ret)
return ret;
}
@@ -284,44 +285,14 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->soc_info = data;
mac->dev = &pdev->dev;
+ mac->plat_dat = plat_dat;
plat_dat->bsp_priv = mac;
+ plat_dat->init = ingenic_mac_init;
- ret = ingenic_mac_init(plat_dat);
- if (ret)
- return ret;
-
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ingenic_mac_suspend(struct device *dev)
-{
- int ret;
-
- ret = stmmac_suspend(dev);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-static int ingenic_mac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret;
-
- ret = ingenic_mac_init(priv->plat);
- if (ret)
- return ret;
-
- ret = stmmac_resume(dev);
-
- return ret;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(ingenic_mac_pm_ops, ingenic_mac_suspend, ingenic_mac_resume);
-
static struct ingenic_soc_info jz4775_soc_info = {
.version = ID_JZ4775,
.mask = MACPHYC_TXCLK_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
@@ -370,10 +341,9 @@ MODULE_DEVICE_TABLE(of, ingenic_mac_of_matches);
static struct platform_driver ingenic_mac_driver = {
.probe = ingenic_mac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "ingenic-mac",
- .pm = pm_ptr(&ingenic_mac_pm_ops),
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = ingenic_mac_of_matches,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index d94f0a150e93..4ea7b0a803d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -22,45 +22,12 @@ struct intel_dwmac {
};
struct intel_dwmac_data {
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
unsigned long ptp_ref_clk_rate;
unsigned long tx_clk_rate;
bool tx_clk_en;
};
-static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct intel_dwmac *dwmac = priv;
- unsigned long rate;
- int ret;
-
- rate = clk_get_rate(dwmac->tx_clk);
-
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
-
- case SPEED_100:
- rate = 25000000;
- break;
-
- case SPEED_10:
- rate = 2500000;
- break;
-
- default:
- dev_err(dwmac->dev, "Invalid speed\n");
- break;
- }
-
- ret = clk_set_rate(dwmac->tx_clk, rate);
- if (ret)
- dev_err(dwmac->dev, "Failed to configure tx clock rate\n");
-}
-
static const struct intel_dwmac_data kmb_data = {
- .fix_mac_speed = kmb_eth_fix_mac_speed,
.ptp_ref_clk_rate = 200000000,
.tx_clk_rate = 125000000,
.tx_clk_en = true,
@@ -103,8 +70,6 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
* platform_match().
*/
dwmac->data = device_get_match_data(&pdev->dev);
- if (dwmac->data->fix_mac_speed)
- plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
/* Enable TX clock */
if (dwmac->data->tx_clk_en) {
@@ -146,15 +111,9 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
}
}
+ plat_dat->clk_tx_i = dwmac->tx_clk;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
plat_dat->bsp_priv = dwmac;
- plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate;
-
- if (plat_dat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat_dat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, stmmac_res.addr + GMAC_1US_TIC_COUNTER);
- }
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 48acba5eb178..9a47015254bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -5,15 +5,30 @@
#include <linux/clk-provider.h>
#include <linux/pci.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/intel_pmc_ipc.h>
#include "dwmac-intel.h"
#include "dwmac4.h"
#include "stmmac.h"
#include "stmmac_ptp.h"
+struct pmc_serdes_regs {
+ u8 index;
+ u32 val;
+};
+
+struct pmc_serdes_reg_info {
+ const struct pmc_serdes_regs *regs;
+ u8 num_regs;
+};
+
struct intel_priv_data {
int mdio_adhoc_addr; /* mdio address for serdes & etc */
unsigned long crossts_adj;
bool is_pse;
+ const int *tsn_lane_regs;
+ int max_tsn_lane_regs;
+ struct pmc_serdes_reg_info pid_1g;
+ struct pmc_serdes_reg_info pid_2p5g;
};
/* This struct is used to associate PCI Function of MAC controller on a board,
@@ -35,6 +50,45 @@ struct stmmac_pci_info {
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
};
+static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = {
+ { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = {
+ { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = {
+ { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = {
+ { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
+ {}
+};
+
+static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11};
+static const int adln_tsn_lane_regs[] = {6};
+
static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
const struct dmi_system_id *dmi_list)
{
@@ -93,7 +147,7 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
data &= ~SERDES_RATE_MASK;
data &= ~SERDES_PCLK_MASK;
- if (priv->plat->max_speed == 2500)
+ if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
else
@@ -230,28 +284,28 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
}
-static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
+static void tgl_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
+ unsigned long *interfaces)
{
- struct intel_priv_data *intel_priv = intel_data;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int serdes_phy_addr = 0;
- u32 data = 0;
-
- serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+ struct intel_priv_data *intel_priv = bsp_priv;
+ phy_interface_t interface;
+ int data;
/* Determine the link speed mode: 2.5Gbps/1Gbps */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR);
+ data = mdiobus_read(priv->mii, intel_priv->mdio_adhoc_addr, SERDES_GCR);
+ if (data < 0)
+ return;
- if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
- SERDES_LINK_MODE_2G5) {
+ if (FIELD_GET(SERDES_LINK_MODE_MASK, data) == SERDES_LINK_MODE_2G5) {
dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
- priv->plat->max_speed = 2500;
- priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
priv->plat->mdio_bus_data->default_an_inband = false;
+ interface = PHY_INTERFACE_MODE_2500BASEX;
} else {
- priv->plat->max_speed = 1000;
+ interface = PHY_INTERFACE_MODE_SGMII;
}
+
+ __set_bit(interface, interfaces);
+ priv->plat->phy_interface = interface;
}
/* Program PTP Clock Frequency for different variant of
@@ -415,6 +469,95 @@ static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
}
}
+static int intel_tsn_lane_is_available(struct net_device *ndev,
+ struct intel_priv_data *intel_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct pmc_ipc_cmd tmp = {};
+ struct pmc_ipc_rbuf rbuf = {};
+ int ret = 0, i, j;
+ const int max_fia_regs = 5;
+
+ tmp.cmd = IPC_SOC_REGISTER_ACCESS;
+ tmp.sub_cmd = IPC_SOC_SUB_CMD_READ;
+
+ for (i = 0; i < max_fia_regs; i++) {
+ tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i;
+
+ ret = intel_pmc_ipc(&tmp, &rbuf);
+ if (ret < 0) {
+ netdev_info(priv->dev, "Failed to read from PMC.\n");
+ return ret;
+ }
+
+ for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++)
+ if ((rbuf.buf[0] >>
+ (4 * (intel_priv->tsn_lane_regs[j] % 8)) &
+ B_PCH_FIA_PCR_L0O) == 0xB)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < max_regs; i++) {
+ struct pmc_ipc_cmd tmp = {};
+ struct pmc_ipc_rbuf rbuf = {};
+
+ tmp.cmd = IPC_SOC_REGISTER_ACCESS;
+ tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE;
+ tmp.wbuf[0] = (u32)regs[i].index;
+ tmp.wbuf[1] = regs[i].val;
+
+ ret = intel_pmc_ipc(&tmp, &rbuf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int intel_mac_finish(struct net_device *ndev,
+ void *intel_data,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct intel_priv_data *intel_priv = intel_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ const struct pmc_serdes_regs *regs;
+ int max_regs = 0;
+ int ret = 0;
+
+ ret = intel_tsn_lane_is_available(ndev, intel_priv);
+ if (ret < 0) {
+ netdev_info(priv->dev, "No TSN lane available to set the registers.\n");
+ return ret;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX) {
+ regs = intel_priv->pid_2p5g.regs;
+ max_regs = intel_priv->pid_2p5g.num_regs;
+ } else {
+ regs = intel_priv->pid_1g.regs;
+ max_regs = intel_priv->pid_1g.num_regs;
+ }
+
+ ret = intel_set_reg_access(regs, max_regs);
+ if (ret < 0)
+ return ret;
+
+ priv->plat->phy_interface = interface;
+
+ intel_serdes_powerdown(ndev, intel_priv);
+ intel_serdes_powerup(ndev, intel_priv);
+
+ return ret;
+}
+
static void common_default_data(struct plat_stmmacenet_data *plat)
{
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
@@ -539,7 +682,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->axi->axi_blen[2] = 16;
plat->ptp_max_adj = plat->clk_ptp_rate;
- plat->eee_usecs_rate = plat->clk_ptp_rate;
/* Set system clock */
sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
@@ -624,6 +766,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
static int ehl_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
@@ -639,20 +783,29 @@ static int ehl_common_data(struct pci_dev *pdev,
plat->safety_feat_cfg->prtyen = 0;
plat->safety_feat_cfg->tmouten = 0;
+ intel_priv->tsn_lane_regs = ehl_tsn_lane_regs;
+ intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs);
+
return intel_mgbe_common_data(pdev, plat);
}
static int ehl_sgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
-
+ plat->mac_finish = intel_mac_finish;
plat->clk_ptp_rate = 204800000;
+ intel_priv->pid_1g.regs = pid_modphy3_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs);
+
return ehl_common_data(pdev, plat);
}
@@ -705,10 +858,18 @@ static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
return ehl_pse0_common_data(pdev, plat);
}
@@ -746,10 +907,18 @@ static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
return ehl_pse1_common_data(pdev, plat);
}
@@ -763,7 +932,7 @@ static int tgl_common_data(struct pci_dev *pdev,
plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 204800000;
- plat->speed_mode_2500 = intel_speed_mode_2500;
+ plat->get_interfaces = tgl_get_interfaces;
plat->safety_feat_cfg->tsoee = 1;
plat->safety_feat_cfg->mrxpee = 0;
@@ -782,7 +951,6 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -796,7 +964,6 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -810,7 +977,6 @@ static int adls_sgmii_phy0_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
/* SerDes power up and power down are done in BIOS for ADL */
@@ -825,7 +991,6 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
/* SerDes power up and power down are done in BIOS for ADL */
@@ -835,6 +1000,55 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev,
static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
.setup = adls_sgmii_phy1_data,
};
+
+static int adln_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ plat->rx_queues_to_use = 6;
+ plat->tx_queues_to_use = 4;
+ plat->clk_ptp_rate = 204800000;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 0;
+ plat->safety_feat_cfg->prtyen = 0;
+ plat->safety_feat_cfg->tmouten = 0;
+
+ intel_priv->tsn_lane_regs = adln_tsn_lane_regs;
+ intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs);
+
+ return intel_mgbe_common_data(pdev, plat);
+}
+
+static int adln_sgmii_phy0_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
+ return adln_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info adln_sgmii1g_phy0_info = {
+ .setup = adln_sgmii_phy0_data,
+};
+
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
{
.func = 6,
@@ -1094,13 +1308,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
memset(&res, 0, sizeof(res));
res.addr = pcim_iomap_table(pdev)[0];
- if (plat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
- }
-
ret = stmmac_config_multi_msi(pdev, plat, &res);
if (ret) {
ret = stmmac_config_single_msi(pdev, plat, &res);
@@ -1217,8 +1424,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
- { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
- { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
index 0a37987478c1..7511c224b312 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -21,7 +21,6 @@
#define SERDES_RATE_MASK GENMASK(9, 8)
#define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */
#define SERDES_LINK_MODE_MASK GENMASK(2, 1)
-#define SERDES_LINK_MODE_SHIFT 1
#define SERDES_PWR_ST_SHIFT 4
#define SERDES_PWR_ST_P0 0x0
#define SERDES_PWR_ST_P3 0x3
@@ -50,4 +49,33 @@
#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
#define PCH_PTP_CLK_FREQ_200MHZ (0)
+/* Modphy Register index */
+#define R_PCH_FIA_15_PCR_LOS1_REG_BASE 8
+#define R_PCH_FIA_15_PCR_LOS2_REG_BASE 9
+#define R_PCH_FIA_15_PCR_LOS3_REG_BASE 10
+#define R_PCH_FIA_15_PCR_LOS4_REG_BASE 11
+#define R_PCH_FIA_15_PCR_LOS5_REG_BASE 12
+#define B_PCH_FIA_PCR_L0O GENMASK(3, 0)
+#define PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0 13
+#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2 14
+#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7 15
+#define PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10 16
+#define PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30 17
+#define PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0 18
+#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2 19
+#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7 20
+#define PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10 21
+#define PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30 22
+
+#define B_MODPHY_PCR_LCPLL_DWORD0_1G 0x46AAAA41
+#define N_MODPHY_PCR_LCPLL_DWORD2_1G 0x00000139
+#define N_MODPHY_PCR_LCPLL_DWORD7_1G 0x002A0003
+#define N_MODPHY_PCR_LPPLL_DWORD10_1G 0x00170008
+#define N_MODPHY_PCR_CMN_ANA_DWORD30_1G 0x0000D4AC
+#define B_MODPHY_PCR_LCPLL_DWORD0_2P5G 0x58555551
+#define N_MODPHY_PCR_LCPLL_DWORD2_2P5G 0x0000012D
+#define N_MODPHY_PCR_LCPLL_DWORD7_2P5G 0x001F0003
+#define N_MODPHY_PCR_LPPLL_DWORD10_2P5G 0x00170008
+#define N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G 0x8200ACAC
+
#endif /* __DWMAC_INTEL_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 61227dcf56dc..ca4035cbb55b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -112,7 +112,7 @@ struct ipq806x_gmac {
phy_interface_t phy_mode;
};
-static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, int speed)
{
struct device *dev = &gmac->pdev->dev;
int div;
@@ -138,7 +138,7 @@ static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
return div;
}
-static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, int speed)
{
struct device *dev = &gmac->pdev->dev;
int div;
@@ -164,7 +164,7 @@ static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
return div;
}
-static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, int speed)
{
uint32_t clk_bits, val;
int div;
@@ -211,16 +211,12 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
return 0;
}
-static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac,
+ struct plat_stmmacenet_data *plat_dat)
{
struct device *dev = &gmac->pdev->dev;
- int ret;
- ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode);
- if (ret) {
- dev_err(dev, "missing phy mode property\n");
- return -EINVAL;
- }
+ gmac->phy_mode = plat_dat->phy_interface;
if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
dev_err(dev, "missing qcom id property\n");
@@ -260,11 +256,12 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
return PTR_ERR_OR_ZERO(gmac->qsgmii_csr);
}
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int ipq806x_gmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct ipq806x_gmac *gmac = priv;
+ struct ipq806x_gmac *gmac = bsp_priv;
- ipq806x_gmac_set_speed(gmac, speed);
+ return ipq806x_gmac_set_speed(gmac, speed);
}
static int
@@ -397,7 +394,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
gmac->pdev = pdev;
- err = ipq806x_gmac_of_parse(gmac);
+ err = ipq806x_gmac_of_parse(gmac, plat_dat);
if (err) {
dev_err(dev, "device tree parsing error\n");
return err;
@@ -478,7 +475,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
plat_dat->has_gmac = true;
plat_dat->bsp_priv = gmac;
- plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = ipq806x_gmac_set_clk_tx_rate;
plat_dat->multicast_filter_bins = 0;
plat_dat->tx_fifo_size = 8192;
plat_dat->rx_fifo_size = 8192;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index bfe6e2d631bd..e1591e6217d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -11,6 +11,8 @@
#include "dwmac_dma.h"
#include "dwmac1000.h"
+#define DRIVER_NAME "dwmac-loongson-pci"
+
/* Normal Loongson Tx Summary */
#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
/* Normal Loongson Rx Summary */
@@ -64,12 +66,14 @@
DMA_STATUS_TPS | DMA_STATUS_TI | \
DMA_STATUS_MSK_COMMON_LOONGSON)
-#define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03
+#define PCI_DEVICE_ID_LOONGSON_GMAC1 0x7a03
+#define PCI_DEVICE_ID_LOONGSON_GMAC2 0x7a23
#define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13
-#define DWMAC_CORE_LS_MULTICHAN 0x10 /* Loongson custom ID */
-#define CHANNEL_NUM 8
+#define DWMAC_CORE_MULTICHAN_V1 0x10 /* Loongson custom ID 0x10 */
+#define DWMAC_CORE_MULTICHAN_V2 0x12 /* Loongson custom ID 0x12 */
struct loongson_data {
+ u32 multichan;
u32 loongson_id;
struct device *dev;
};
@@ -81,6 +85,8 @@ struct stmmac_pci_info {
static void loongson_default_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct loongson_data *ld = plat->bsp_priv;
+
/* Get bus_id, this can be overwritten later */
plat->bus_id = pci_dev_id(pdev);
@@ -114,31 +120,37 @@ static void loongson_default_data(struct pci_dev *pdev,
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
-}
-static int loongson_gmac_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- struct loongson_data *ld;
- int i;
-
- ld = plat->bsp_priv;
-
- loongson_default_data(pdev, plat);
-
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
- plat->rx_queues_to_use = CHANNEL_NUM;
- plat->tx_queues_to_use = CHANNEL_NUM;
+ switch (ld->loongson_id) {
+ case DWMAC_CORE_MULTICHAN_V1:
+ ld->multichan = 1;
+ plat->rx_queues_to_use = 8;
+ plat->tx_queues_to_use = 8;
/* Only channel 0 supports checksum,
* so turn off checksum to enable multiple channels.
*/
- for (i = 1; i < CHANNEL_NUM; i++)
+ for (int i = 1; i < 8; i++)
plat->tx_queues_cfg[i].coe_unsupported = 1;
- } else {
+
+ break;
+ case DWMAC_CORE_MULTICHAN_V2:
+ ld->multichan = 1;
+ plat->rx_queues_to_use = 4;
+ plat->tx_queues_to_use = 4;
+ break;
+ default:
+ ld->multichan = 0;
plat->tx_queues_to_use = 1;
plat->rx_queues_to_use = 1;
+ break;
}
+}
+
+static int loongson_gmac_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ loongson_default_data(pdev, plat);
plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
@@ -149,8 +161,7 @@ static struct stmmac_pci_info loongson_gmac_pci_info = {
.setup = loongson_gmac_data,
};
-static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
- unsigned int mode)
+static void loongson_gnet_fix_speed(void *priv, int speed, unsigned int mode)
{
struct loongson_data *ld = (struct loongson_data *)priv;
struct net_device *ndev = dev_get_drvdata(ld->dev);
@@ -171,27 +182,8 @@ static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
static int loongson_gnet_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
- struct loongson_data *ld;
- int i;
-
- ld = plat->bsp_priv;
-
loongson_default_data(pdev, plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
- plat->rx_queues_to_use = CHANNEL_NUM;
- plat->tx_queues_to_use = CHANNEL_NUM;
-
- /* Only channel 0 supports checksum,
- * so turn off checksum to enable multiple channels.
- */
- for (i = 1; i < CHANNEL_NUM; i++)
- plat->tx_queues_cfg[i].coe_unsupported = 1;
- } else {
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
- }
-
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
plat->mdio_bus_data->phy_mask = ~(u32)BIT(2);
plat->fix_mac_speed = loongson_gnet_fix_speed;
@@ -349,14 +341,14 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv)
return NULL;
/* The Loongson GMAC and GNET devices are based on the DW GMAC
- * v3.50a and v3.73a IP-cores. But the HW designers have changed the
- * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the
- * network controllers with the multi-channels feature
+ * v3.50a and v3.73a IP-cores. But the HW designers have changed
+ * the GMAC_VERSION.SNPSVER field to the custom 0x10/0x12 value
+ * on the network controllers with the multi-channels feature
* available to emphasize the differences: multiple DMA-channels,
* AV feature and GMAC_INT_STATUS CSR flags layout. Get back the
* original value so the correct HW-interface would be selected.
*/
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ if (ld->multichan) {
priv->synopsys_id = DWMAC_CORE_3_70;
*dma = dwmac1000_dma_ops;
dma->init_chan = loongson_dwmac_dma_init_channel;
@@ -377,13 +369,13 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
- /* Loongson GMAC doesn't support the flow control. LS2K2000
- * GNET doesn't support the half-duplex link mode.
+ /* Loongson GMAC doesn't support the flow control. Loongson GNET
+ * without multi-channel doesn't support the half-duplex link mode.
*/
- if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) {
+ if (pdev->device != PCI_DEVICE_ID_LOONGSON_GNET) {
mac->link.caps = MAC_10 | MAC_100 | MAC_1000;
} else {
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000;
else
@@ -412,9 +404,11 @@ static int loongson_dwmac_msi_config(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res)
{
- int i, ret, vecs;
+ int i, ch_num, ret, vecs;
+
+ ch_num = min(plat->tx_queues_to_use, plat->rx_queues_to_use);
- vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1);
+ vecs = roundup_pow_of_two(ch_num * 2 + 1);
ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI);
if (ret < 0) {
dev_warn(&pdev->dev, "Failed to allocate MSI IRQs\n");
@@ -423,14 +417,12 @@ static int loongson_dwmac_msi_config(struct pci_dev *pdev,
res->irq = pci_irq_vector(pdev, 0);
- for (i = 0; i < plat->rx_queues_to_use; i++) {
- res->rx_irq[CHANNEL_NUM - 1 - i] =
- pci_irq_vector(pdev, 1 + i * 2);
+ for (i = 0; i < ch_num; i++) {
+ res->rx_irq[ch_num - 1 - i] = pci_irq_vector(pdev, 1 + i * 2);
}
- for (i = 0; i < plat->tx_queues_to_use; i++) {
- res->tx_irq[CHANNEL_NUM - 1 - i] =
- pci_irq_vector(pdev, 2 + i * 2);
+ for (i = 0; i < ch_num; i++) {
+ res->tx_irq[ch_num - 1 - i] = pci_irq_vector(pdev, 2 + i * 2);
}
plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
@@ -516,13 +508,26 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
return 0;
}
+/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
+static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ !(value & DMA_BUS_MODE_SFT_RESET),
+ 10000, 2000000);
+}
+
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res = {};
struct stmmac_pci_info *info;
- struct stmmac_resources res;
struct loongson_data *ld;
- int ret, i;
+ int ret;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -552,20 +557,14 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
pci_set_master(pdev);
/* Get the base address of device */
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
- if (ret)
- goto err_disable_device;
- break;
- }
-
- memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[0];
+ res.addr = pcim_iomap_region(pdev, 0, DRIVER_NAME);
+ ret = PTR_ERR_OR_ZERO(res.addr);
+ if (ret)
+ goto err_disable_device;
plat->bsp_priv = ld;
plat->setup = loongson_dwmac_setup;
+ plat->fix_soc_reset = loongson_dwmac_fix_reset;
ld->dev = &pdev->dev;
ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
@@ -574,6 +573,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (ret)
goto err_disable_device;
+ plat->tx_fifo_size = SZ_16K * plat->tx_queues_to_use;
+ plat->rx_fifo_size = SZ_16K * plat->rx_queues_to_use;
+
if (dev_of_node(&pdev->dev))
ret = loongson_dwmac_dt_config(pdev, plat, &res);
else
@@ -582,7 +584,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
goto err_disable_device;
/* Use the common MAC IRQ if per-channel MSIs allocation failed */
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
loongson_dwmac_msi_config(pdev, plat, &res);
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
@@ -594,7 +596,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
err_plat_clear:
if (dev_of_node(&pdev->dev))
loongson_dwmac_dt_clear(pdev, plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
loongson_dwmac_msi_clear(pdev);
err_disable_device:
pci_disable_device(pdev);
@@ -606,7 +608,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
struct net_device *ndev = dev_get_drvdata(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
struct loongson_data *ld;
- int i;
ld = priv->plat->bsp_priv;
stmmac_dvr_remove(&pdev->dev);
@@ -614,16 +615,9 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
if (dev_of_node(&pdev->dev))
loongson_dwmac_dt_clear(pdev, priv->plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
loongson_dwmac_msi_clear(pdev);
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
-
pci_disable_device(pdev);
}
@@ -666,14 +660,15 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
loongson_dwmac_resume);
static const struct pci_device_id loongson_dwmac_id_table[] = {
- { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC1, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC2, &loongson_gmac_pci_info) },
{ PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) },
{}
};
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
static struct pci_driver loongson_dwmac_driver = {
- .name = "dwmac-loongson-pci",
+ .name = DRIVER_NAME,
.id_table = loongson_dwmac_id_table,
.probe = loongson_dwmac_probe,
.remove = loongson_dwmac_remove,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index c9636832a570..39421d6a34e4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -456,7 +456,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
u32 tx_delay_ps, rx_delay_ps;
- int err;
plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
if (IS_ERR(plat->peri_regmap)) {
@@ -464,12 +463,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
return PTR_ERR(plat->peri_regmap);
}
- err = of_get_phy_mode(plat->np, &plat->phy_mode);
- if (err) {
- dev_err(plat->dev, "not find phy-mode\n");
- return err;
- }
-
if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
if (tx_delay_ps < plat->variant->tx_delay_max) {
mac_delay->tx_delay = tx_delay_ps;
@@ -587,7 +580,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
{
int i;
- plat->mac_interface = priv_plat->phy_mode;
+ priv_plat->phy_mode = plat->phy_interface;
if (priv_plat->mac_wol)
plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL;
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 5469fa1b429e..07c504d07604 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -22,9 +22,10 @@ struct meson_dwmac {
void __iomem *reg;
};
-static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int meson6_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct meson_dwmac *dwmac = priv;
+ struct meson_dwmac *dwmac = bsp_priv;
unsigned int val;
val = readl(dwmac->reg);
@@ -39,6 +40,8 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned
}
writel(val, dwmac->reg);
+
+ return 0;
}
static int meson6_dwmac_probe(struct platform_device *pdev)
@@ -65,7 +68,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(dwmac->reg);
plat_dat->bsp_priv = dwmac;
- plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = meson6_dwmac_set_clk_tx_rate;
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 9c2d62d133ad..a50782994b97 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -417,11 +417,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(dwmac->regs);
dwmac->dev = &pdev->dev;
- ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
- if (ret) {
- dev_err(&pdev->dev, "missing phy-mode property\n");
- return ret;
- }
+ dwmac->phy_mode = plat_dat->phy_interface;
/* use 2ns as fallback since this value was previously hardcoded */
if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 901a3c1959fa..e30bdf72331a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -106,12 +106,11 @@ struct qcom_ethqos {
struct platform_device *pdev;
void __iomem *rgmii_base;
void __iomem *mac_base;
- int (*configure_func)(struct qcom_ethqos *ethqos);
+ int (*configure_func)(struct qcom_ethqos *ethqos, int speed);
unsigned int link_clk_rate;
struct clk *link_clk;
struct phy *serdes_phy;
- unsigned int speed;
int serdes_speed;
phy_interface_t phy_mode;
@@ -169,30 +168,17 @@ static void rgmii_dump(void *priv)
rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG));
}
-/* Clock rates */
-#define RGMII_1000_NOM_CLK_FREQ (250 * 1000 * 1000UL)
-#define RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ (50 * 1000 * 1000UL)
-#define RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ (5 * 1000 * 1000UL)
-
static void
-ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
+ethqos_update_link_clk(struct qcom_ethqos *ethqos, int speed)
{
+ long rate;
+
if (!phy_interface_mode_is_rgmii(ethqos->phy_mode))
return;
- switch (speed) {
- case SPEED_1000:
- ethqos->link_clk_rate = RGMII_1000_NOM_CLK_FREQ;
- break;
-
- case SPEED_100:
- ethqos->link_clk_rate = RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ;
- break;
-
- case SPEED_10:
- ethqos->link_clk_rate = RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ;
- break;
- }
+ rate = rgmii_clock(speed);
+ if (rate > 0)
+ ethqos->link_clk_rate = rate * 2;
clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate);
}
@@ -398,7 +384,7 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
return 0;
}
-static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
+static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
{
struct device *dev = &ethqos->pdev->dev;
int phase_shift;
@@ -425,7 +411,7 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
0, RGMII_IO_MACRO_CONFIG);
- switch (ethqos->speed) {
+ switch (speed) {
case SPEED_1000:
rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
@@ -545,14 +531,14 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
loopback, RGMII_IO_MACRO_CONFIG);
break;
default:
- dev_err(dev, "Invalid speed %d\n", ethqos->speed);
+ dev_err(dev, "Invalid speed %d\n", speed);
return -EINVAL;
}
return 0;
}
-static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
+static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
{
struct device *dev = &ethqos->pdev->dev;
volatile unsigned int dll_lock;
@@ -575,7 +561,7 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
if (ethqos->has_emac_ge_3) {
- if (ethqos->speed == SPEED_1000) {
+ if (speed == SPEED_1000) {
rgmii_writel(ethqos, 0x1800000, SDCC_TEST_CTL);
rgmii_writel(ethqos, 0x2C010800, SDCC_USR_CTL);
rgmii_writel(ethqos, 0xA001, SDCC_HC_REG_DLL_CONFIG2);
@@ -593,7 +579,7 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0,
SDCC_HC_REG_DLL_CONFIG);
- if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) {
+ if (speed != SPEED_100 && speed != SPEED_10) {
/* Set DLL_EN */
rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
@@ -620,10 +606,10 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
dev_err(dev, "Timeout while waiting for DLL lock\n");
}
- if (ethqos->speed == SPEED_1000)
+ if (speed == SPEED_1000)
ethqos_dll_configure(ethqos);
- ethqos_rgmii_macro_init(ethqos);
+ ethqos_rgmii_macro_init(ethqos, speed);
return 0;
}
@@ -639,7 +625,7 @@ static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed)
/* On interface toggle MAC registers gets reset.
* Configure MAC block for SGMII on ethernet phy link up
*/
-static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
+static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
{
struct net_device *dev = platform_get_drvdata(ethqos->pdev);
struct stmmac_priv *priv = netdev_priv(dev);
@@ -647,7 +633,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
val = readl(ethqos->mac_base + MAC_CTRL_REG);
- switch (ethqos->speed) {
+ switch (speed) {
case SPEED_2500:
val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
@@ -686,27 +672,18 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
return val;
}
-static void qcom_ethqos_speed_mode_2500(struct net_device *ndev, void *data)
+static int ethqos_configure(struct qcom_ethqos *ethqos, int speed)
{
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- priv->plat->max_speed = 2500;
- priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
+ return ethqos->configure_func(ethqos, speed);
}
-static int ethqos_configure(struct qcom_ethqos *ethqos)
-{
- return ethqos->configure_func(ethqos);
-}
-
-static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode)
{
struct qcom_ethqos *ethqos = priv;
qcom_ethqos_set_sgmii_loopback(ethqos, false);
- ethqos->speed = speed;
ethqos_update_link_clk(ethqos, speed);
- ethqos_configure(ethqos);
+ ethqos_configure(ethqos, speed);
}
static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv)
@@ -722,7 +699,7 @@ static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv)
if (ret)
return ret;
- return phy_set_speed(ethqos->serdes_phy, ethqos->speed);
+ return phy_set_speed(ethqos->serdes_phy, ethqos->serdes_speed);
}
static void qcom_ethqos_serdes_powerdown(struct net_device *ndev, void *priv)
@@ -777,7 +754,7 @@ static void ethqos_ptp_clk_freq_config(struct stmmac_priv *priv)
netdev_err(priv->dev, "Failed to max out clk_ptp_ref: %d\n", err);
plat_dat->clk_ptp_rate = clk_get_rate(plat_dat->clk_ptp_ref);
- netdev_dbg(priv->dev, "PTP rate %d\n", plat_dat->clk_ptp_rate);
+ netdev_dbg(priv->dev, "PTP rate %lu\n", plat_dat->clk_ptp_rate);
}
static int qcom_ethqos_probe(struct platform_device *pdev)
@@ -807,9 +784,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
if (!ethqos)
return -ENOMEM;
- ret = of_get_phy_mode(np, &ethqos->phy_mode);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get phy mode\n");
+ ethqos->phy_mode = plat_dat->phy_interface;
switch (ethqos->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -818,8 +793,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ethqos->configure_func = ethqos_configure_rgmii;
break;
case PHY_INTERFACE_MODE_2500BASEX:
- plat_dat->speed_mode_2500 = qcom_ethqos_speed_mode_2500;
- fallthrough;
case PHY_INTERFACE_MODE_SGMII:
ethqos->configure_func = ethqos_configure_sgmii;
break;
@@ -862,7 +835,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(ethqos->serdes_phy),
"Failed to get serdes phy\n");
- ethqos->speed = SPEED_1000;
ethqos->serdes_speed = SPEED_1000;
ethqos_update_link_clk(ethqos, SPEED_1000);
ethqos_set_func_clk_en(ethqos);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
new file mode 100644
index 000000000000..9a774046455b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dwmac-renesas-gbeth.c - DWMAC Specific Glue layer for Renesas GBETH
+ *
+ * The Rx and Tx clocks are supplied as follows for the GBETH IP.
+ *
+ * Rx / Tx
+ * -------+------------- on / off -------
+ * |
+ * | Rx-180 / Tx-180
+ * +---- not ---- on / off -------
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "stmmac_platform.h"
+
+struct renesas_gbeth {
+ struct plat_stmmacenet_data *plat_dat;
+ struct reset_control *rstc;
+ struct device *dev;
+};
+
+static const char *const renesas_gbeth_clks[] = {
+ "tx", "tx-180", "rx", "rx-180",
+};
+
+static int renesas_gbeth_init(struct platform_device *pdev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct renesas_gbeth *gbeth = priv;
+ int ret;
+
+ plat_dat = gbeth->plat_dat;
+
+ ret = reset_control_deassert(gbeth->rstc);
+ if (ret) {
+ dev_err(gbeth->dev, "Reset deassert failed\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(plat_dat->num_clks,
+ plat_dat->clks);
+ if (ret)
+ reset_control_assert(gbeth->rstc);
+
+ return ret;
+}
+
+static void renesas_gbeth_exit(struct platform_device *pdev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct renesas_gbeth *gbeth = priv;
+ int ret;
+
+ plat_dat = gbeth->plat_dat;
+
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
+
+ ret = reset_control_assert(gbeth->rstc);
+ if (ret)
+ dev_err(gbeth->dev, "Reset assert failed\n");
+}
+
+static int renesas_gbeth_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ struct renesas_gbeth *gbeth;
+ unsigned int i;
+ int err;
+
+ err = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to get resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "dt configuration failed\n");
+
+ gbeth = devm_kzalloc(dev, sizeof(*gbeth), GFP_KERNEL);
+ if (!gbeth)
+ return -ENOMEM;
+
+ plat_dat->num_clks = ARRAY_SIZE(renesas_gbeth_clks);
+ plat_dat->clks = devm_kcalloc(dev, plat_dat->num_clks,
+ sizeof(*plat_dat->clks), GFP_KERNEL);
+ if (!plat_dat->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < plat_dat->num_clks; i++)
+ plat_dat->clks[i].id = renesas_gbeth_clks[i];
+
+ err = devm_clk_bulk_get(dev, plat_dat->num_clks, plat_dat->clks);
+ if (err < 0)
+ return err;
+
+ plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx");
+ if (!plat_dat->clk_tx_i)
+ return dev_err_probe(dev, -EINVAL,
+ "error finding tx clock\n");
+
+ gbeth->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(gbeth->rstc))
+ return PTR_ERR(gbeth->rstc);
+
+ gbeth->dev = dev;
+ gbeth->plat_dat = plat_dat;
+ plat_dat->bsp_priv = gbeth;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->init = renesas_gbeth_init;
+ plat_dat->exit = renesas_gbeth_exit;
+ plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY |
+ STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
+ STMMAC_FLAG_SPH_DISABLE;
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id renesas_gbeth_match[] = {
+ { .compatible = "renesas,rzv2h-gbeth", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_gbeth_match);
+
+static struct platform_driver renesas_gbeth_driver = {
+ .probe = renesas_gbeth_probe,
+ .driver = {
+ .name = "renesas-gbeth",
+ .of_match_table = renesas_gbeth_match,
+ },
+};
+module_platform_driver(renesas_gbeth_driver);
+
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas GBETH DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 8cb374668b74..700858ff6f7c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -33,6 +33,8 @@ struct rk_gmac_ops {
void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ void (*integrated_phy_powerdown)(struct rk_priv_data *bsp_priv);
+ bool php_grf_required;
bool regs_valid;
u32 regs[];
};
@@ -91,6 +93,76 @@ struct rk_priv_data {
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define RK_GRF_MACPHY_CON0 0xb00
+#define RK_GRF_MACPHY_CON1 0xb04
+#define RK_GRF_MACPHY_CON2 0xb08
+#define RK_GRF_MACPHY_CON3 0xb0c
+
+#define RK_MACPHY_ENABLE GRF_BIT(0)
+#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
+#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
+#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
+#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
+#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
+
+static void rk_gmac_integrated_ephy_powerup(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
+
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
+
+ if (priv->phy_reset) {
+ /* PHY needs to be disabled before trying to reset it */
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+ usleep_range(10, 20);
+ if (priv->phy_reset)
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(10, 20);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
+ msleep(30);
+ }
+}
+
+static void rk_gmac_integrated_ephy_powerdown(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+}
+
+#define RK_FEPHY_SHUTDOWN GRF_BIT(1)
+#define RK_FEPHY_POWERUP GRF_CLR_BIT(1)
+#define RK_FEPHY_INTERNAL_RMII_SEL GRF_BIT(6)
+#define RK_FEPHY_24M_CLK_SEL (GRF_BIT(8) | GRF_BIT(9))
+#define RK_FEPHY_PHY_ID GRF_BIT(11)
+
+static void rk_gmac_integrated_fephy_powerup(struct rk_priv_data *priv,
+ unsigned int reg)
+{
+ reset_control_assert(priv->phy_reset);
+ usleep_range(20, 30);
+
+ regmap_write(priv->grf, reg,
+ RK_FEPHY_POWERUP |
+ RK_FEPHY_INTERNAL_RMII_SEL |
+ RK_FEPHY_24M_CLK_SEL |
+ RK_FEPHY_PHY_ID);
+ usleep_range(10000, 12000);
+
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(50000, 60000);
+}
+
+static void rk_gmac_integrated_fephy_powerdown(struct rk_priv_data *priv,
+ unsigned int reg)
+{
+ regmap_write(priv->grf, reg, RK_FEPHY_SHUTDOWN);
+}
+
#define PX30_GRF_GMAC_CON1 0x0904
/* PX30_GRF_GMAC_CON1 */
@@ -101,13 +173,6 @@ struct rk_priv_data {
static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
PX30_GMAC_PHY_INTF_SEL_RMII);
}
@@ -181,13 +246,6 @@ static const struct rk_gmac_ops px30_ops = {
static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_PHY_INTF_SEL_RGMII |
RK3128_GMAC_RMII_MODE_CLR);
@@ -199,13 +257,6 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
}
@@ -214,11 +265,6 @@ static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_CLK_2_5M);
@@ -236,11 +282,6 @@ static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_RMII_CLK_2_5M |
@@ -297,13 +338,6 @@ static const struct rk_gmac_ops rk3128_ops = {
static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_PHY_INTF_SEL_RGMII |
RK3228_GMAC_RMII_MODE_CLR |
@@ -316,13 +350,6 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_PHY_INTF_SEL_RMII |
RK3228_GMAC_RMII_MODE);
@@ -335,11 +362,6 @@ static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_CLK_2_5M);
@@ -357,11 +379,6 @@ static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_RMII_CLK_2_5M |
@@ -378,6 +395,8 @@ static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3228_GRF_CON_MUX,
RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY);
+
+ rk_gmac_integrated_ephy_powerup(priv);
}
static const struct rk_gmac_ops rk3228_ops = {
@@ -385,7 +404,8 @@ static const struct rk_gmac_ops rk3228_ops = {
.set_to_rmii = rk3228_set_to_rmii,
.set_rgmii_speed = rk3228_set_rgmii_speed,
.set_rmii_speed = rk3228_set_rmii_speed,
- .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+ .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
#define RK3288_GRF_SOC_CON1 0x0248
@@ -419,13 +439,6 @@ static const struct rk_gmac_ops rk3228_ops = {
static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_PHY_INTF_SEL_RGMII |
RK3288_GMAC_RMII_MODE_CLR);
@@ -437,13 +450,6 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
}
@@ -452,11 +458,6 @@ static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_CLK_2_5M);
@@ -474,11 +475,6 @@ static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_RMII_CLK_2_5M |
@@ -511,13 +507,6 @@ static const struct rk_gmac_ops rk3288_ops = {
static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
RK3308_GMAC_PHY_INTF_SEL_RMII);
}
@@ -526,11 +515,6 @@ static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
RK3308_GMAC_SPEED_10M);
@@ -583,13 +567,6 @@ static const struct rk_gmac_ops rk3308_ops = {
static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
RK3328_GMAC_PHY_INTF_SEL_RGMII |
RK3328_GMAC_RMII_MODE_CLR |
@@ -603,14 +580,8 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
RK3328_GRF_MAC_CON1;
@@ -623,11 +594,6 @@ static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
RK3328_GMAC_CLK_2_5M);
@@ -646,11 +612,6 @@ static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
RK3328_GRF_MAC_CON1;
@@ -670,6 +631,8 @@ static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3328_GRF_MACPHY_CON1,
RK3328_MACPHY_RMII_MODE);
+
+ rk_gmac_integrated_ephy_powerup(priv);
}
static const struct rk_gmac_ops rk3328_ops = {
@@ -677,7 +640,8 @@ static const struct rk_gmac_ops rk3328_ops = {
.set_to_rmii = rk3328_set_to_rmii,
.set_rgmii_speed = rk3328_set_rgmii_speed,
.set_rmii_speed = rk3328_set_rmii_speed,
- .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+ .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
#define RK3366_GRF_SOC_CON6 0x0418
@@ -711,13 +675,6 @@ static const struct rk_gmac_ops rk3328_ops = {
static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_PHY_INTF_SEL_RGMII |
RK3366_GMAC_RMII_MODE_CLR);
@@ -729,13 +686,6 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
}
@@ -744,11 +694,6 @@ static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_CLK_2_5M);
@@ -766,11 +711,6 @@ static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_RMII_CLK_2_5M |
@@ -822,13 +762,6 @@ static const struct rk_gmac_ops rk3366_ops = {
static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_PHY_INTF_SEL_RGMII |
RK3368_GMAC_RMII_MODE_CLR);
@@ -840,13 +773,6 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
}
@@ -855,11 +781,6 @@ static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_CLK_2_5M);
@@ -877,11 +798,6 @@ static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_RMII_CLK_2_5M |
@@ -933,13 +849,6 @@ static const struct rk_gmac_ops rk3368_ops = {
static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_PHY_INTF_SEL_RGMII |
RK3399_GMAC_RMII_MODE_CLR);
@@ -951,13 +860,6 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
}
@@ -966,11 +868,6 @@ static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_CLK_2_5M);
@@ -988,11 +885,6 @@ static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_RMII_CLK_2_5M |
@@ -1013,6 +905,149 @@ static const struct rk_gmac_ops rk3399_ops = {
.set_rmii_speed = rk3399_set_rmii_speed,
};
+#define RK3528_VO_GRF_GMAC_CON 0x0018
+#define RK3528_VO_GRF_MACPHY_CON0 0x001c
+#define RK3528_VO_GRF_MACPHY_CON1 0x0020
+#define RK3528_VPU_GRF_GMAC_CON5 0x0018
+#define RK3528_VPU_GRF_GMAC_CON6 0x001c
+
+#define RK3528_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3528_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3528_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define RK3528_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+
+#define RK3528_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
+#define RK3528_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+
+#define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1)
+#define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8)
+#define RK3528_GMAC1_PHY_INTF_SEL_RMII GRF_BIT(8)
+
+#define RK3528_GMAC1_CLK_SELECT_CRU GRF_CLR_BIT(12)
+#define RK3528_GMAC1_CLK_SELECT_IO GRF_BIT(12)
+
+#define RK3528_GMAC0_CLK_RMII_DIV2 GRF_BIT(3)
+#define RK3528_GMAC0_CLK_RMII_DIV20 GRF_CLR_BIT(3)
+#define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10)
+#define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10)
+
+#define RK3528_GMAC1_CLK_RGMII_DIV1 (GRF_CLR_BIT(11) | GRF_CLR_BIT(10))
+#define RK3528_GMAC1_CLK_RGMII_DIV5 (GRF_BIT(11) | GRF_BIT(10))
+#define RK3528_GMAC1_CLK_RGMII_DIV50 (GRF_BIT(11) | GRF_CLR_BIT(10))
+
+#define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2)
+#define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2)
+#define RK3528_GMAC1_CLK_RMII_GATE GRF_BIT(9)
+#define RK3528_GMAC1_CLK_RMII_NOGATE GRF_CLR_BIT(9)
+
+static void rk3528_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RGMII);
+
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ DELAY_ENABLE(RK3528, tx_delay, rx_delay));
+
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON6,
+ RK3528_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3528_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3528_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ if (bsp_priv->id == 1)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RMII);
+ else
+ regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON,
+ RK3528_GMAC0_PHY_INTF_SEL_RMII |
+ RK3528_GMAC0_CLK_RMII_DIV2);
+}
+
+static void rk3528_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_CLK_RGMII_DIV50);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_CLK_RGMII_DIV5);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_CLK_RGMII_DIV1);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3528_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int reg, val;
+
+ if (speed == 10)
+ val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV20 :
+ RK3528_GMAC0_CLK_RMII_DIV20;
+ else if (speed == 100)
+ val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV2 :
+ RK3528_GMAC0_CLK_RMII_DIV2;
+ else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ return;
+ }
+
+ reg = bsp_priv->id == 1 ? RK3528_VPU_GRF_GMAC_CON5 :
+ RK3528_VO_GRF_GMAC_CON;
+
+ regmap_write(bsp_priv->grf, reg, val);
+}
+
+static void rk3528_set_clock_selection(struct rk_priv_data *bsp_priv,
+ bool input, bool enable)
+{
+ unsigned int val;
+
+ if (bsp_priv->id == 1) {
+ val = input ? RK3528_GMAC1_CLK_SELECT_IO :
+ RK3528_GMAC1_CLK_SELECT_CRU;
+ val |= enable ? RK3528_GMAC1_CLK_RMII_NOGATE :
+ RK3528_GMAC1_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, val);
+ } else {
+ val = enable ? RK3528_GMAC0_CLK_RMII_NOGATE :
+ RK3528_GMAC0_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON, val);
+ }
+}
+
+static void rk3528_integrated_phy_powerup(struct rk_priv_data *bsp_priv)
+{
+ rk_gmac_integrated_fephy_powerup(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
+}
+
+static void rk3528_integrated_phy_powerdown(struct rk_priv_data *bsp_priv)
+{
+ rk_gmac_integrated_fephy_powerdown(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
+}
+
+static const struct rk_gmac_ops rk3528_ops = {
+ .set_to_rgmii = rk3528_set_to_rgmii,
+ .set_to_rmii = rk3528_set_to_rmii,
+ .set_rgmii_speed = rk3528_set_rgmii_speed,
+ .set_rmii_speed = rk3528_set_rmii_speed,
+ .set_clock_selection = rk3528_set_clock_selection,
+ .integrated_phy_powerup = rk3528_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk3528_integrated_phy_powerdown,
+ .regs_valid = true,
+ .regs = {
+ 0xffbd0000, /* gmac0 */
+ 0xffbe0000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
#define RK3568_GRF_GMAC0_CON0 0x0380
#define RK3568_GRF_GMAC0_CON1 0x0384
#define RK3568_GRF_GMAC1_CON0 0x0388
@@ -1037,14 +1072,8 @@ static const struct rk_gmac_ops rk3399_ops = {
static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 con0, con1;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
con0 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON0 :
RK3568_GRF_GMAC0_CON0;
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
@@ -1062,14 +1091,8 @@ static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 con1;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
RK3568_GRF_GMAC0_CON1;
regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
@@ -1079,20 +1102,11 @@ static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
+ long rate;
int ret;
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- case 1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
return;
}
@@ -1156,14 +1170,8 @@ static const struct rk_gmac_ops rk3568_ops = {
static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int offset_con;
- if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "Missing rockchip,grf or rockchip,php-grf property\n");
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
@@ -1189,14 +1197,8 @@ static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int offset_con;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
@@ -1263,6 +1265,7 @@ static const struct rk_gmac_ops rk3576_ops = {
.set_rgmii_speed = rk3576_set_gmac_speed,
.set_rmii_speed = rk3576_set_gmac_speed,
.set_clock_selection = rk3576_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0x2a220000, /* gmac0 */
@@ -1315,14 +1318,8 @@ static const struct rk_gmac_ops rk3576_ops = {
static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 offset_con, id = bsp_priv->id;
- if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "Missing rockchip,grf or rockchip,php_grf property\n");
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 :
RK3588_GRF_GMAC_CON8;
@@ -1343,13 +1340,6 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "%s: Missing rockchip,php_grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id));
@@ -1410,6 +1400,7 @@ static const struct rk_gmac_ops rk3588_ops = {
.set_rgmii_speed = rk3588_set_gmac_speed,
.set_rmii_speed = rk3588_set_gmac_speed,
.set_clock_selection = rk3588_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0xfe1b0000, /* gmac0 */
@@ -1432,13 +1423,6 @@ static const struct rk_gmac_ops rk3588_ops = {
static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
RV1108_GMAC_PHY_INTF_SEL_RMII);
}
@@ -1447,11 +1431,6 @@ static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
RV1108_GMAC_RMII_CLK_2_5M |
@@ -1500,13 +1479,6 @@ static const struct rk_gmac_ops rv1108_ops = {
static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
RV1126_GMAC_PHY_INTF_SEL_RGMII |
RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
@@ -1525,13 +1497,6 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
RV1126_GMAC_PHY_INTF_SEL_RMII);
}
@@ -1540,20 +1505,11 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
+ long rate;
int ret;
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- case 1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
return;
}
@@ -1596,50 +1552,6 @@ static const struct rk_gmac_ops rv1126_ops = {
.set_rmii_speed = rv1126_set_rmii_speed,
};
-#define RK_GRF_MACPHY_CON0 0xb00
-#define RK_GRF_MACPHY_CON1 0xb04
-#define RK_GRF_MACPHY_CON2 0xb08
-#define RK_GRF_MACPHY_CON3 0xb0c
-
-#define RK_MACPHY_ENABLE GRF_BIT(0)
-#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
-#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
-#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
-#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
-#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
-
-static void rk_gmac_integrated_phy_powerup(struct rk_priv_data *priv)
-{
- if (priv->ops->integrated_phy_powerup)
- priv->ops->integrated_phy_powerup(priv);
-
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
-
- regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
-
- if (priv->phy_reset) {
- /* PHY needs to be disabled before trying to reset it */
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
- if (priv->phy_reset)
- reset_control_assert(priv->phy_reset);
- usleep_range(10, 20);
- if (priv->phy_reset)
- reset_control_deassert(priv->phy_reset);
- usleep_range(10, 20);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
- msleep(30);
- }
-}
-
-static void rk_gmac_integrated_phy_powerdown(struct rk_priv_data *priv)
-{
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
- if (priv->phy_reset)
- reset_control_assert(priv->phy_reset);
-}
-
static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
{
struct rk_priv_data *bsp_priv = plat->bsp_priv;
@@ -1767,7 +1679,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
if (!bsp_priv)
return ERR_PTR(-ENOMEM);
- of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
+ bsp_priv->phy_iface = plat->phy_interface;
bsp_priv->ops = ops;
/* Some SoCs have multiple MAC controllers, which need
@@ -1830,8 +1742,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
- bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
- "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->grf),
+ "failed to lookup rockchip,grf\n");
+ return ERR_CAST(bsp_priv->grf);
+ }
+
+ if (ops->php_grf_required) {
+ bsp_priv->php_grf =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->php_grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->php_grf),
+ "failed to lookup rockchip,php-grf\n");
+ return ERR_CAST(bsp_priv->php_grf);
+ }
+ }
if (plat->phy_node) {
bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
@@ -1921,16 +1847,16 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
pm_runtime_get_sync(dev);
- if (bsp_priv->integrated_phy)
- rk_gmac_integrated_phy_powerup(bsp_priv);
+ if (bsp_priv->integrated_phy && bsp_priv->ops->integrated_phy_powerup)
+ bsp_priv->ops->integrated_phy_powerup(bsp_priv);
return 0;
}
static void rk_gmac_powerdown(struct rk_priv_data *gmac)
{
- if (gmac->integrated_phy)
- rk_gmac_integrated_phy_powerdown(gmac);
+ if (gmac->integrated_phy && gmac->ops->integrated_phy_powerdown)
+ gmac->ops->integrated_phy_powerdown(gmac);
pm_runtime_put_sync(&gmac->pdev->dev);
@@ -1938,9 +1864,10 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
gmac_clk_enable(gmac, false);
}
-static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct rk_priv_data *bsp_priv = priv;
+ struct rk_priv_data *bsp_priv = bsp_priv_;
struct device *dev = &bsp_priv->pdev->dev;
switch (bsp_priv->phy_iface) {
@@ -1958,6 +1885,8 @@ static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode)
default:
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
}
+
+ return 0;
}
static int rk_gmac_probe(struct platform_device *pdev)
@@ -1984,9 +1913,13 @@ static int rk_gmac_probe(struct platform_device *pdev)
/* If the stmmac is not already selected as gmac4,
* then make sure we fallback to gmac.
*/
- if (!plat_dat->has_gmac4)
+ if (!plat_dat->has_gmac4) {
plat_dat->has_gmac = true;
- plat_dat->fix_mac_speed = rk_fix_speed;
+ plat_dat->rx_fifo_size = 4096;
+ plat_dat->tx_fifo_size = 2048;
+ }
+
+ plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
if (IS_ERR(plat_dat->bsp_priv))
@@ -2062,6 +1995,7 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
+ { .compatible = "rockchip,rk3528-gmac", .data = &rk3528_ops },
{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
{ .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops },
{ .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
new file mode 100644
index 000000000000..221539d760bc
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NXP S32G/R GMAC glue layer
+ *
+ * Copyright 2019-2024 NXP
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define GMAC_INTF_RATE_125M 125000000 /* 125MHz */
+
+/* SoC PHY interface control register */
+#define PHY_INTF_SEL_MII 0x00
+#define PHY_INTF_SEL_SGMII 0x01
+#define PHY_INTF_SEL_RGMII 0x02
+#define PHY_INTF_SEL_RMII 0x08
+
+struct s32_priv_data {
+ void __iomem *ioaddr;
+ void __iomem *ctrl_sts;
+ struct device *dev;
+ phy_interface_t *intf_mode;
+ struct clk *tx_clk;
+ struct clk *rx_clk;
+};
+
+static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac)
+{
+ writel(PHY_INTF_SEL_RGMII, gmac->ctrl_sts);
+
+ dev_dbg(gmac->dev, "PHY mode set to %s\n", phy_modes(*gmac->intf_mode));
+
+ return 0;
+}
+
+static int s32_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct s32_priv_data *gmac = priv;
+ int ret;
+
+ /* Set initial TX interface clock */
+ ret = clk_prepare_enable(gmac->tx_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable tx clock\n");
+ return ret;
+ }
+ ret = clk_set_rate(gmac->tx_clk, GMAC_INTF_RATE_125M);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set tx clock\n");
+ goto err_tx_disable;
+ }
+
+ /* Set initial RX interface clock */
+ ret = clk_prepare_enable(gmac->rx_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable rx clock\n");
+ goto err_tx_disable;
+ }
+ ret = clk_set_rate(gmac->rx_clk, GMAC_INTF_RATE_125M);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set rx clock\n");
+ goto err_txrx_disable;
+ }
+
+ /* Set interface mode */
+ ret = s32_gmac_write_phy_intf_select(gmac);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set PHY interface mode\n");
+ goto err_txrx_disable;
+ }
+
+ return 0;
+
+err_txrx_disable:
+ clk_disable_unprepare(gmac->rx_clk);
+err_tx_disable:
+ clk_disable_unprepare(gmac->tx_clk);
+ return ret;
+}
+
+static void s32_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct s32_priv_data *gmac = priv;
+
+ clk_disable_unprepare(gmac->tx_clk);
+ clk_disable_unprepare(gmac->rx_clk);
+}
+
+static int s32_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat;
+ struct device *dev = &pdev->dev;
+ struct stmmac_resources res;
+ struct s32_priv_data *gmac;
+ int ret;
+
+ gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return -ENOMEM;
+
+ gmac->dev = &pdev->dev;
+
+ ret = stmmac_get_platform_resources(pdev, &res);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get platform resources\n");
+
+ plat = devm_stmmac_probe_config_dt(pdev, res.mac);
+ if (IS_ERR(plat))
+ return dev_err_probe(dev, PTR_ERR(plat),
+ "dt configuration failed\n");
+
+ /* PHY interface mode control reg */
+ gmac->ctrl_sts = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ if (IS_ERR(gmac->ctrl_sts))
+ return dev_err_probe(dev, PTR_ERR(gmac->ctrl_sts),
+ "S32CC config region is missing\n");
+
+ /* tx clock */
+ gmac->tx_clk = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(gmac->tx_clk))
+ return dev_err_probe(dev, PTR_ERR(gmac->tx_clk),
+ "tx clock not found\n");
+
+ /* rx clock */
+ gmac->rx_clk = devm_clk_get(&pdev->dev, "rx");
+ if (IS_ERR(gmac->rx_clk))
+ return dev_err_probe(dev, PTR_ERR(gmac->rx_clk),
+ "rx clock not found\n");
+
+ gmac->intf_mode = &plat->phy_interface;
+ gmac->ioaddr = res.addr;
+
+ /* S32CC core feature set */
+ plat->has_gmac4 = true;
+ plat->pmt = 1;
+ plat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat->rx_fifo_size = 20480;
+ plat->tx_fifo_size = 20480;
+
+ plat->init = s32_gmac_init;
+ plat->exit = s32_gmac_exit;
+
+ plat->clk_tx_i = gmac->tx_clk;
+ plat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+
+ plat->bsp_priv = gmac;
+
+ return stmmac_pltfr_probe(pdev, plat, &res);
+}
+
+static const struct of_device_id s32_dwmac_match[] = {
+ { .compatible = "nxp,s32g2-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s32_dwmac_match);
+
+static struct platform_driver s32_dwmac_driver = {
+ .probe = s32_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "s32-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = s32_dwmac_match,
+ },
+};
+module_platform_driver(s32_dwmac_driver);
+
+MODULE_AUTHOR("Jan Petrous (OSS) <jan.petrous@oss.nxp.com>");
+MODULE_DESCRIPTION("NXP S32G/R common chassis GMAC driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 16020b72dec8..72b50f6d72f4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -50,6 +50,7 @@ struct socfpga_dwmac {
u32 reg_offset;
u32 reg_shift;
struct device *dev;
+ struct plat_stmmacenet_data *plat_dat;
struct regmap *sys_mgr_base_addr;
struct reset_control *stmmac_rst;
struct reset_control *stmmac_ocp_rst;
@@ -58,17 +59,15 @@ struct socfpga_dwmac {
void __iomem *sgmii_adapter_base;
bool f2h_ptp_ref_clk;
const struct socfpga_dwmac_ops *ops;
- struct mdio_device *pcs_mdiodev;
};
-static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void socfpga_dwmac_fix_mac_speed(void *bsp_priv, int speed,
+ unsigned int mode)
{
- struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
+ struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)bsp_priv;
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dwmac->dev));
void __iomem *splitter_base = dwmac->splitter_base;
void __iomem *sgmii_adapter_base = dwmac->sgmii_adapter_base;
- struct device *dev = dwmac->dev;
- struct net_device *ndev = dev_get_drvdata(dev);
- struct phy_device *phy_dev = ndev->phydev;
u32 val;
if (sgmii_adapter_base)
@@ -95,7 +94,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
- if (phy_dev && sgmii_adapter_base)
+ if ((priv->plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ priv->plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) &&
+ sgmii_adapter_base)
writew(SGMII_ADAPTER_ENABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
}
@@ -233,10 +234,7 @@ err_node_put:
static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac)
{
- struct net_device *ndev = dev_get_drvdata(dwmac->dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- return priv->plat->mac_interface;
+ return dwmac->plat_dat->mac_interface;
}
static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable)
@@ -258,6 +256,7 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val)
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
*val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
break;
case PHY_INTERFACE_MODE_RMII:
@@ -435,6 +434,13 @@ static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv,
return priv->hw->phylink_pcs;
}
+static int socfpga_dwmac_init(struct platform_device *pdev, void *bsp_priv)
+{
+ struct socfpga_dwmac *dwmac = bsp_priv;
+
+ return dwmac->ops->set_phy_mode(dwmac);
+}
+
static int socfpga_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -442,8 +448,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
struct socfpga_dwmac *dwmac;
- struct net_device *ndev;
- struct stmmac_priv *stpriv;
const struct socfpga_dwmac_ops *ops;
ops = device_get_match_data(&pdev->dev);
@@ -479,9 +483,17 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
return ret;
}
+ /* The socfpga driver needs to control the stmmac reset to set the phy
+ * mode. Create a copy of the core reset handle so it can be used by
+ * the driver later.
+ */
+ dwmac->stmmac_rst = plat_dat->stmmac_rst;
dwmac->ops = ops;
+ dwmac->plat_dat = plat_dat;
+
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+ plat_dat->init = socfpga_dwmac_init;
plat_dat->pcs_init = socfpga_dwmac_pcs_init;
plat_dat->pcs_exit = socfpga_dwmac_pcs_exit;
plat_dat->select_pcs = socfpga_dwmac_select_pcs;
@@ -489,85 +501,9 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
plat_dat->riwt_off = 1;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- return ret;
-
- ndev = platform_get_drvdata(pdev);
- stpriv = netdev_priv(ndev);
-
- /* The socfpga driver needs to control the stmmac reset to set the phy
- * mode. Create a copy of the core reset handle so it can be used by
- * the driver later.
- */
- dwmac->stmmac_rst = stpriv->plat->stmmac_rst;
-
- ret = ops->set_phy_mode(dwmac);
- if (ret)
- goto err_dvr_remove;
-
- return 0;
-
-err_dvr_remove:
- stmmac_dvr_remove(&pdev->dev);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-#ifdef CONFIG_PM_SLEEP
-static int socfpga_dwmac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct socfpga_dwmac *dwmac_priv = get_stmmac_bsp_priv(dev);
-
- dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
-
- /* Before the enet controller is suspended, the phy is suspended.
- * This causes the phy clock to be gated. The enet controller is
- * resumed before the phy, so the clock is still gated "off" when
- * the enet controller is resumed. This code makes sure the phy
- * is "resumed" before reinitializing the enet controller since
- * the enet controller depends on an active phy clock to complete
- * a DMA reset. A DMA reset will "time out" if executed
- * with no phy clock input on the Synopsys enet controller.
- * Verified through Synopsys Case #8000711656.
- *
- * Note that the phy clock is also gated when the phy is isolated.
- * Phy "suspend" and "isolate" controls are located in phy basic
- * control register 0, and can be modified by the phy driver
- * framework.
- */
- if (ndev->phydev)
- phy_resume(ndev->phydev);
-
- return stmmac_resume(dev);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- stmmac_bus_clks_config(priv, false);
-
- return 0;
-}
-
-static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- return stmmac_bus_clks_config(priv, true);
-}
-
-static const struct dev_pm_ops socfpga_dwmac_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume)
- SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL)
-};
-
static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
.set_phy_mode = socfpga_gen5_set_phy_mode,
};
@@ -585,10 +521,9 @@ MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
static struct platform_driver socfpga_dwmac_driver = {
.probe = socfpga_dwmac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
- .pm = &socfpga_dwmac_pm_ops,
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = socfpga_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
new file mode 100644
index 000000000000..3303784cbbf8
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Sophgo DWMAC platform driver
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include "stmmac_platform.h"
+
+static int sophgo_sg2044_dwmac_init(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *stmmac_res)
+{
+ plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(plat_dat->clk_tx_i))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i),
+ "failed to get tx clock\n");
+
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->multicast_filter_bins = 0;
+ plat_dat->unicast_filter_entries = 1;
+
+ return 0;
+}
+
+static int sophgo_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get platform resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "failed to parse DT parameters\n");
+
+ ret = sophgo_sg2044_dwmac_init(pdev, plat_dat, &stmmac_res);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(dev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id sophgo_dwmac_match[] = {
+ { .compatible = "sophgo,sg2044-dwmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sophgo_dwmac_match);
+
+static struct platform_driver sophgo_dwmac_driver = {
+ .probe = sophgo_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sophgo-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sophgo_dwmac_match,
+ },
+};
+module_platform_driver(sophgo_dwmac_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo DWMAC platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 421666279dd3..2013d7477eb7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -27,38 +27,9 @@ struct starfive_dwmac_data {
struct starfive_dwmac {
struct device *dev;
- struct clk *clk_tx;
const struct starfive_dwmac_data *data;
};
-static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct starfive_dwmac *dwmac = priv;
- unsigned long rate;
- int err;
-
- rate = clk_get_rate(dwmac->clk_tx);
-
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_10:
- rate = 2500000;
- break;
- default:
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
- break;
- }
-
- err = clk_set_rate(dwmac->clk_tx, rate);
- if (err)
- dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
-}
-
static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct starfive_dwmac *dwmac = plat_dat->bsp_priv;
@@ -133,9 +104,9 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
dwmac->data = device_get_match_data(&pdev->dev);
- dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
- if (IS_ERR(dwmac->clk_tx))
- return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
+ plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(plat_dat->clk_tx_i))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i),
"error getting tx clock\n");
clk_gtx = devm_clk_get_enabled(&pdev->dev, "gtx");
@@ -150,9 +121,10 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
* internally, because rgmii_rxin will be adaptively adjusted.
*/
if (!device_property_read_bool(&pdev->dev, "starfive,tx-use-rgmii-clk"))
- plat_dat->fix_mac_speed = starfive_dwmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
dwmac->dev = &pdev->dev;
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
plat_dat->dma_cfg->dche = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index a6ff02d905a9..53d5ce1f6dc6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -21,17 +21,9 @@
#include "stmmac_platform.h"
-#define DWMAC_125MHZ 125000000
#define DWMAC_50MHZ 50000000
-#define DWMAC_25MHZ 25000000
-#define DWMAC_2_5MHZ 2500000
-#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
- iface == PHY_INTERFACE_MODE_RGMII_ID || \
- iface == PHY_INTERFACE_MODE_RGMII_RXID || \
- iface == PHY_INTERFACE_MODE_RGMII_TXID)
-
-#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
+#define IS_PHY_IF_MODE_GBIT(iface) (phy_interface_mode_is_rgmii(iface) || \
iface == PHY_INTERFACE_MODE_GMII)
/* STiH4xx register definitions (STiH407/STiH410 families)
@@ -102,12 +94,12 @@ struct sti_dwmac {
int clk_sel_reg; /* GMAC ext clk selection register */
struct regmap *regmap;
bool gmac_en;
- u32 speed;
- void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
+ int speed;
+ void (*fix_retime_src)(void *priv, int speed, unsigned int mode);
};
struct sti_dwmac_of_data {
- void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
+ void (*fix_retime_src)(void *priv, int speed, unsigned int mode);
};
static u32 phy_intf_sels[] = {
@@ -135,12 +127,12 @@ static u32 stih4xx_tx_retime_val[] = {
| STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
};
-static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
+static void stih4xx_fix_retime_src(void *priv, int spd, unsigned int mode)
{
struct sti_dwmac *dwmac = priv;
u32 src = dwmac->tx_retime_src;
u32 reg = dwmac->ctrl_reg;
- u32 freq = 0;
+ long freq = 0;
if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
src = TX_RETIME_SRC_TXCLK;
@@ -151,21 +143,16 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
src = TX_RETIME_SRC_CLKGEN;
freq = DWMAC_50MHZ;
}
- } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
+ } else if (phy_interface_mode_is_rgmii(dwmac->interface)) {
/* On GiGa clk source can be either ext or from clkgen */
- if (spd == SPEED_1000) {
- freq = DWMAC_125MHZ;
- } else {
+ freq = rgmii_clock(spd);
+
+ if (spd != SPEED_1000 && freq > 0)
/* Switch to clkgen for these speeds */
src = TX_RETIME_SRC_CLKGEN;
- if (spd == SPEED_100)
- freq = DWMAC_25MHZ;
- else if (spd == SPEED_10)
- freq = DWMAC_2_5MHZ;
- }
}
- if (src == TX_RETIME_SRC_CLKGEN && freq)
+ if (src == TX_RETIME_SRC_CLKGEN && freq > 0)
clk_set_rate(dwmac->clk, freq);
regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
@@ -193,7 +180,8 @@ static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
}
static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
- struct platform_device *pdev)
+ struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
struct resource *res;
struct device *dev = &pdev->dev;
@@ -207,22 +195,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (res)
dwmac->clk_sel_reg = res->start;
- regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon",
+ 1, &dwmac->ctrl_reg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
- if (err) {
- dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
- return err;
- }
-
- err = of_get_phy_mode(np, &dwmac->interface);
- if (err && err != -ENODEV) {
- dev_err(dev, "Can't get phy-mode\n");
- return err;
- }
-
+ dwmac->interface = plat_dat->phy_interface;
dwmac->regmap = regmap;
dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
@@ -255,6 +233,29 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return 0;
}
+static int sti_dwmac_init(struct platform_device *pdev, void *bsp_priv)
+{
+ struct sti_dwmac *dwmac = bsp_priv;
+ int ret;
+
+ ret = clk_prepare_enable(dwmac->clk);
+ if (ret)
+ return ret;
+
+ ret = sti_dwmac_set_mode(dwmac);
+ if (ret)
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
+static void sti_dwmac_exit(struct platform_device *pdev, void *bsp_priv)
+{
+ struct sti_dwmac *dwmac = bsp_priv;
+
+ clk_disable_unprepare(dwmac->clk);
+}
+
static int sti_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -281,7 +282,7 @@ static int sti_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- ret = sti_dwmac_parse_data(dwmac, pdev);
+ ret = sti_dwmac_parse_data(dwmac, pdev, plat_dat);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
return ret;
@@ -291,61 +292,12 @@ static int sti_dwmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = data->fix_retime_src;
+ plat_dat->init = sti_dwmac_init;
+ plat_dat->exit = sti_dwmac_exit;
- ret = clk_prepare_enable(dwmac->clk);
- if (ret)
- return ret;
-
- ret = sti_dwmac_set_mode(dwmac);
- if (ret)
- goto disable_clk;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto disable_clk;
-
- return 0;
-
-disable_clk:
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-static void sti_dwmac_remove(struct platform_device *pdev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
-
- stmmac_dvr_remove(&pdev->dev);
-
- clk_disable_unprepare(dwmac->clk);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int sti_dwmac_suspend(struct device *dev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
- int ret = stmmac_suspend(dev);
-
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
-}
-
-static int sti_dwmac_resume(struct device *dev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
-
- clk_prepare_enable(dwmac->clk);
- sti_dwmac_set_mode(dwmac);
-
- return stmmac_resume(dev);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
- sti_dwmac_resume);
-
static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
.fix_retime_src = stih4xx_fix_retime_src,
};
@@ -358,10 +310,9 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match);
static struct platform_driver sti_dwmac_driver = {
.probe = sti_dwmac_probe,
- .remove = sti_dwmac_remove,
.driver = {
.name = "sti-dwmac",
- .pm = &sti_dwmac_pm_ops,
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = sti_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 1e8bac665cc9..1eb16eec9c0d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -119,7 +119,7 @@ struct stm32_ops {
u32 syscfg_clr_off;
};
-static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
+static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac)
{
int ret;
@@ -127,11 +127,9 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
if (ret)
goto err_clk_tx;
- if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) {
- ret = clk_prepare_enable(dwmac->clk_rx);
- if (ret)
- goto err_clk_rx;
- }
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret)
+ goto err_clk_rx;
ret = clk_prepare_enable(dwmac->syscfg_clk);
if (ret)
@@ -148,15 +146,14 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
err_clk_eth_ck:
clk_disable_unprepare(dwmac->syscfg_clk);
err_syscfg_clk:
- if (!dwmac->ops->clk_rx_enable_in_suspend || !resume)
- clk_disable_unprepare(dwmac->clk_rx);
+ clk_disable_unprepare(dwmac->clk_rx);
err_clk_rx:
clk_disable_unprepare(dwmac->clk_tx);
err_clk_tx:
return ret;
}
-static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
+static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
int ret;
@@ -167,7 +164,7 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
return ret;
}
- return stm32_dwmac_clk_enable(dwmac, resume);
+ return stm32_dwmac_clk_enable(dwmac);
}
static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat)
@@ -382,12 +379,10 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
SYSCFG_MCU_ETH_MASK, val << 23);
}
-static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend)
+static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac)
{
clk_disable_unprepare(dwmac->clk_tx);
- if (!dwmac->ops->clk_rx_enable_in_suspend || !suspend)
- clk_disable_unprepare(dwmac->clk_rx);
-
+ clk_disable_unprepare(dwmac->clk_rx);
clk_disable_unprepare(dwmac->syscfg_clk);
if (dwmac->enable_eth_ck)
clk_disable_unprepare(dwmac->clk_eth_ck);
@@ -419,16 +414,11 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
}
/* Get mode register */
- dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ dwmac->regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon",
+ 1, &dwmac->mode_reg);
if (IS_ERR(dwmac->regmap))
return PTR_ERR(dwmac->regmap);
- err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg);
- if (err) {
- dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err);
- return err;
- }
-
if (dwmac->ops->is_mp2)
return 0;
@@ -543,20 +533,35 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
return ret;
}
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
- ret = stm32_dwmac_init(plat_dat, false);
+ ret = stm32_dwmac_init(plat_dat);
if (ret)
return ret;
+ /* If this platform requires the clock to be running in suspend,
+ * prepare and enable the receive clock an additional time to keep
+ * it running.
+ */
+ if (dwmac->ops->clk_rx_enable_in_suspend) {
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret)
+ goto err_clk_disable;
+ }
+
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_clk_disable;
+ goto err_clk_disable_suspend;
return 0;
+err_clk_disable_suspend:
+ if (dwmac->ops->clk_rx_enable_in_suspend)
+ clk_disable_unprepare(dwmac->clk_rx);
+
err_clk_disable:
- stm32_dwmac_clk_disable(dwmac, false);
+ stm32_dwmac_clk_disable(dwmac);
return ret;
}
@@ -569,7 +574,15 @@ static void stm32_dwmac_remove(struct platform_device *pdev)
stmmac_dvr_remove(&pdev->dev);
- stm32_dwmac_clk_disable(dwmac, false);
+ /* If this platform requires the clock to be running in suspend,
+ * we need to disable and unprepare the receive clock an additional
+ * time to balance the extra clk_prepare_enable() in the probe
+ * function.
+ */
+ if (dwmac->ops->clk_rx_enable_in_suspend)
+ clk_disable_unprepare(dwmac->clk_rx);
+
+ stm32_dwmac_clk_disable(dwmac);
if (dwmac->irq_pwr_wakeup >= 0) {
dev_pm_clear_wake_irq(&pdev->dev);
@@ -600,7 +613,7 @@ static int stm32_dwmac_suspend(struct device *dev)
if (ret)
return ret;
- stm32_dwmac_clk_disable(dwmac, true);
+ stm32_dwmac_clk_disable(dwmac);
if (dwmac->ops->suspend)
ret = dwmac->ops->suspend(dwmac);
@@ -618,7 +631,7 @@ static int stm32_dwmac_resume(struct device *dev)
if (dwmac->ops->resume)
dwmac->ops->resume(dwmac);
- ret = stm32_dwmac_init(priv->plat, true);
+ ret = stm32_dwmac_init(priv->plat);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4b7b2582a120..2796dc426943 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -964,7 +964,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
* address. No need to mask it again.
*/
- reg |= 1 << H3_EPHY_ADDR_SHIFT;
+ reg |= ret << H3_EPHY_ADDR_SHIFT;
} else {
/* For SoCs without internal PHY the PHY selection bit should be
* set to 0 (external PHY).
@@ -1155,11 +1155,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct sunxi_priv_data *gmac;
struct device *dev = &pdev->dev;
- phy_interface_t interface;
- int ret;
struct stmmac_priv *priv;
struct net_device *ndev;
struct regmap *regmap;
+ int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -1219,10 +1218,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = of_get_phy_mode(dev->of_node, &interface);
- if (ret)
- return -EINVAL;
-
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -1230,7 +1225,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
- plat_dat->mac_interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->flags |= STMMAC_FLAG_HAS_SUN8I;
@@ -1245,14 +1239,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
+ ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
if (ret)
goto dwmac_syscon;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto dwmac_exit;
-
ndev = dev_get_drvdata(&pdev->dev);
priv = netdev_priv(ndev);
@@ -1289,9 +1279,7 @@ dwmac_mux:
clk_put(gmac->ephy_clk);
dwmac_remove:
pm_runtime_put_noidle(&pdev->dev);
- stmmac_dvr_remove(&pdev->dev);
-dwmac_exit:
- sun8i_dwmac_exit(pdev, gmac);
+ stmmac_pltfr_remove(pdev);
dwmac_syscon:
sun8i_dwmac_unset_syscon(gmac);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 9ae318436c4a..1eadcf5d1ad6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -72,28 +72,28 @@ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
regulator_disable(gmac->regulator);
}
-static void sun7i_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int sun7i_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct sunxi_priv_data *gmac = priv;
-
- /* only GMII mode requires us to reconfigure the clock lines */
- if (gmac->interface != PHY_INTERFACE_MODE_GMII)
- return;
-
- if (gmac->clk_enabled) {
- clk_disable(gmac->tx_clk);
- gmac->clk_enabled = 0;
- }
- clk_unprepare(gmac->tx_clk);
-
- if (speed == 1000) {
- clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
- clk_prepare_enable(gmac->tx_clk);
- gmac->clk_enabled = 1;
- } else {
- clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
- clk_prepare(gmac->tx_clk);
+ struct sunxi_priv_data *gmac = bsp_priv;
+
+ if (interface == PHY_INTERFACE_MODE_GMII) {
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (speed == 1000) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ clk_prepare(gmac->tx_clk);
+ }
}
+ return 0;
}
static int sun7i_gmac_probe(struct platform_device *pdev)
@@ -116,11 +116,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
if (!gmac)
return -ENOMEM;
- ret = of_get_phy_mode(dev->of_node, &gmac->interface);
- if (ret && ret != -ENODEV) {
- dev_err(dev, "Can't get phy-mode\n");
- return ret;
- }
+ gmac->interface = plat_dat->phy_interface;
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
@@ -144,24 +140,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = gmac;
plat_dat->init = sun7i_gmac_init;
plat_dat->exit = sun7i_gmac_exit;
- plat_dat->fix_mac_speed = sun7i_fix_speed;
+ plat_dat->set_clk_tx_rate = sun7i_set_clk_tx_rate;
plat_dat->tx_fifo_size = 4096;
plat_dat->rx_fifo_size = 16384;
- ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_gmac_exit;
-
- return 0;
-
-err_gmac_exit:
- sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id sun7i_dwmac_match[] = {
@@ -172,7 +155,6 @@ MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
static struct platform_driver sun7i_dwmac_driver = {
.probe = sun7i_gmac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "sun7i-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
index 3827997d2132..dc903b846b1b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/module.h>
@@ -19,6 +20,8 @@ struct tegra_mgbe {
struct reset_control *rst_mac;
struct reset_control *rst_pcs;
+ u32 iommu_sid;
+
void __iomem *hv;
void __iomem *regs;
void __iomem *xpcs;
@@ -50,7 +53,6 @@ struct tegra_mgbe {
#define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704
#define MAC_SBD_INTR BIT(2)
#define MGBE_WRAP_AXI_ASID0_CTRL 0x8400
-#define MGBE_SID 0x6
static int __maybe_unused tegra_mgbe_suspend(struct device *dev)
{
@@ -84,7 +86,7 @@ static int __maybe_unused tegra_mgbe_resume(struct device *dev)
writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
/* Program SID */
- writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+ writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) {
@@ -241,6 +243,12 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
if (IS_ERR(mgbe->xpcs))
return PTR_ERR(mgbe->xpcs);
+ /* get controller's stream id from iommu property in device tree */
+ if (!tegra_dev_iommu_get_stream_id(mgbe->dev, &mgbe->iommu_sid)) {
+ dev_err(mgbe->dev, "failed to get iommu stream id\n");
+ return -EINVAL;
+ }
+
res.addr = mgbe->regs;
res.irq = irq;
@@ -346,7 +354,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
/* Program SID */
- writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+ writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
index dce84ed184e9..c72ee759aae5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -45,9 +45,6 @@
#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0)
#define TXCLK_DIR_INPUT FIELD_PREP(TXCLK_DIR_MASK, 1)
-#define GMAC_GMII_RGMII_RATE 125000000
-#define GMAC_MII_RATE 25000000
-
struct thead_dwmac {
struct plat_stmmacenet_data *plat;
void __iomem *apb_base;
@@ -104,11 +101,13 @@ static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat)
return 0;
}
-static void thead_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
+ struct thead_dwmac *dwmac = bsp_priv;
struct plat_stmmacenet_data *plat;
- struct thead_dwmac *dwmac = priv;
unsigned long rate;
+ long tx_rate;
u32 div, reg;
plat = dwmac->plat;
@@ -116,44 +115,37 @@ static void thead_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int m
switch (plat->mac_interface) {
/* For MII, rxc/txc is provided by phy */
case PHY_INTERFACE_MODE_MII:
- return;
+ return 0;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
rate = clk_get_rate(plat->stmmac_clk);
- if (!rate || rate % GMAC_GMII_RGMII_RATE != 0 ||
- rate % GMAC_MII_RATE != 0) {
- dev_err(dwmac->dev, "invalid gmac rate %ld\n", rate);
- return;
- }
writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV);
- switch (speed) {
- case SPEED_1000:
- div = rate / GMAC_GMII_RGMII_RATE;
- break;
- case SPEED_100:
- div = rate / GMAC_MII_RATE;
- break;
- case SPEED_10:
- div = rate * 10 / GMAC_MII_RATE;
- break;
- default:
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
- return;
+ tx_rate = rgmii_clock(speed);
+ if (tx_rate < 0) {
+ dev_err(dwmac->dev, "invalid speed %d\n", speed);
+ return tx_rate;
+ }
+
+ div = rate / tx_rate;
+ if (rate != tx_rate * div) {
+ dev_err(dwmac->dev, "invalid gmac rate %lu\n", rate);
+ return -EINVAL;
}
reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) |
FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div);
writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV);
- break;
+ return 0;
+
default:
dev_err(dwmac->dev, "unsupported phy interface %d\n",
plat->mac_interface);
- return;
+ return -EINVAL;
}
}
@@ -245,7 +237,7 @@ static int thead_dwmac_probe(struct platform_device *pdev)
dwmac->plat = plat;
dwmac->apb_base = apb;
plat->bsp_priv = dwmac;
- plat->fix_mac_speed = thead_dwmac_fix_speed;
+ plat->set_clk_tx_rate = thead_set_clk_tx_rate;
plat->init = thead_dwmac_init;
return devm_stmmac_pltfr_probe(pdev, plat, &stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index eccf7f537467..5e6ac82a89b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -51,21 +51,14 @@ struct visconti_eth {
u32 phy_intf_sel;
struct clk *phy_ref_clk;
struct device *dev;
- spinlock_t lock; /* lock to protect register update */
};
-static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct visconti_eth *dwmac = priv;
+ struct visconti_eth *dwmac = bsp_priv;
struct net_device *netdev = dev_get_drvdata(dwmac->dev);
unsigned int val, clk_sel_val = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&dwmac->lock, flags);
-
- /* adjust link */
- val = readl(dwmac->reg + MAC_CTRL_REG);
- val &= ~(GMAC_CONFIG_PS | GMAC_CONFIG_FES);
switch (speed) {
case SPEED_1000:
@@ -77,24 +70,19 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned
clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_25M;
if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
clk_sel_val = ETHER_CLK_SEL_DIV_SEL_2;
- val |= GMAC_CONFIG_PS | GMAC_CONFIG_FES;
break;
case SPEED_10:
if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_2P5M;
if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
clk_sel_val = ETHER_CLK_SEL_DIV_SEL_20;
- val |= GMAC_CONFIG_PS;
break;
default:
/* No bit control */
netdev_err(netdev, "Unsupported speed request (%d)", speed);
- spin_unlock_irqrestore(&dwmac->lock, flags);
- return;
+ return -EINVAL;
}
- writel(val, dwmac->reg + MAC_CTRL_REG);
-
/* Stop internal clock */
val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN);
@@ -136,7 +124,7 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned
break;
}
- spin_unlock_irqrestore(&dwmac->lock, flags);
+ return 0;
}
static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat)
@@ -228,11 +216,10 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- spin_lock_init(&dwmac->lock);
dwmac->reg = stmmac_res.addr;
dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
- plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = visconti_eth_set_clk_tx_rate;
ret = visconti_eth_clock_probe(pdev, plat_dat);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 600fea8f712f..0c011a47d5a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -59,22 +59,11 @@ enum power_event {
/* Energy Efficient Ethernet (EEE)
*
* LPI status, timer and control register offset
+ * For LPI control and status bit definitions, see common.h.
*/
#define LPI_CTRL_STATUS 0x0030
#define LPI_TIMER_CTRL 0x0034
-/* LPI control and status defines */
-#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
-#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
-#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
-#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
-#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
-#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
-#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
-#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
-#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
-#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
-
/* GMAC HW ADDR regs */
#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
0x00000040 + (reg * 8))
@@ -331,8 +320,8 @@ enum rtc_control {
/* PTP and timestamping registers */
-#define GMAC3_X_ATSNS GENMASK(19, 16)
-#define GMAC3_X_ATSNS_SHIFT 16
+#define GMAC3_X_ATSNS GENMASK(29, 25)
+#define GMAC3_X_ATSNS_SHIFT 25
#define GMAC_PTP_TCR_ATSFC BIT(24)
#define GMAC_PTP_TCR_ATSEN0 BIT(25)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 96bcda0856ec..56b76aaa58f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <linux/string_choices.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
#include "stmmac_ptp.h"
@@ -342,31 +343,24 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
return ret;
}
-static void dwmac1000_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwmac1000_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
- /*TODO - en_tx_lpi_clockgating treatment */
+ if (mode == STMMAC_LPI_TIMER)
+ return -EOPNOTSUPP;
- /* Enable the link status receive on RGMII, SGMII ore SMII
- * receive path and instruct the transmit to enter in LPI
- * state.
- */
value = readl(ioaddr + LPI_CTRL_STATUS);
- value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ if (mode == STMMAC_LPI_FORCED)
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ else
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
writel(value, ioaddr + LPI_CTRL_STATUS);
-}
-
-static void dwmac1000_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
- value = readl(ioaddr + LPI_CTRL_STATUS);
- value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
- writel(value, ioaddr + LPI_CTRL_STATUS);
+ return 0;
}
static void dwmac1000_set_eee_pls(struct mac_device_info *hw, int link)
@@ -509,8 +503,7 @@ const struct stmmac_ops dwmac1000_ops = {
.pmt = dwmac1000_pmt,
.set_umac_addr = dwmac1000_set_umac_addr,
.get_umac_addr = dwmac1000_get_umac_addr,
- .set_eee_mode = dwmac1000_set_eee_mode,
- .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_lpi_mode = dwmac1000_set_lpi_mode,
.set_eee_timer = dwmac1000_set_eee_timer,
.set_eee_pls = dwmac1000_set_eee_pls,
.debug = dwmac1000_debug,
@@ -560,7 +553,7 @@ void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
u64 ns;
ns = readl(ptpaddr + GMAC_PTP_ATNR);
- ns += readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC;
+ ns += (u64)readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC;
*ptp_time = ns;
}
@@ -633,7 +626,7 @@ int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
}
netdev_dbg(priv->dev, "Auxiliary Snapshot %s.\n",
- on ? "enabled" : "disabled");
+ str_enabled_disabled(on));
writel(tcr_val, ptpaddr + PTP_TCR);
/* wait for auxts fifo clear to finish */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 184d41a306af..f4694fd576f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -17,11 +17,7 @@
#define GMAC_EXT_CONFIG 0x00000004
#define GMAC_PACKET_FILTER 0x00000008
#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
-#define GMAC_VLAN_TAG 0x00000050
-#define GMAC_VLAN_TAG_DATA 0x00000054
-#define GMAC_VLAN_HASH_TABLE 0x00000058
#define GMAC_RX_FLOW_CTRL 0x00000090
-#define GMAC_VLAN_INCL 0x00000060
#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
#define GMAC_TXQ_PRTY_MAP0 0x98
#define GMAC_TXQ_PRTY_MAP1 0x9C
@@ -31,7 +27,6 @@
#define GMAC_RXQ_CTRL3 0x000000ac
#define GMAC_INT_STATUS 0x000000b0
#define GMAC_INT_EN 0x000000b4
-#define GMAC_1US_TIC_COUNTER 0x000000dc
#define GMAC_PCS_BASE 0x000000e0
#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
#define GMAC_PMT 0x000000c0
@@ -82,42 +77,6 @@
#define GMAC_MAX_PERFECT_ADDRESSES 128
-/* MAC VLAN */
-#define GMAC_VLAN_EDVLP BIT(26)
-#define GMAC_VLAN_VTHM BIT(25)
-#define GMAC_VLAN_DOVLTC BIT(20)
-#define GMAC_VLAN_ESVL BIT(18)
-#define GMAC_VLAN_ETV BIT(16)
-#define GMAC_VLAN_VID GENMASK(15, 0)
-#define GMAC_VLAN_VLTI BIT(20)
-#define GMAC_VLAN_CSVL BIT(19)
-#define GMAC_VLAN_VLC GENMASK(17, 16)
-#define GMAC_VLAN_VLC_SHIFT 16
-#define GMAC_VLAN_VLHT GENMASK(15, 0)
-
-/* MAC VLAN Tag */
-#define GMAC_VLAN_TAG_VID GENMASK(15, 0)
-#define GMAC_VLAN_TAG_ETV BIT(16)
-
-/* MAC VLAN Tag Control */
-#define GMAC_VLAN_TAG_CTRL_OB BIT(0)
-#define GMAC_VLAN_TAG_CTRL_CT BIT(1)
-#define GMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
-#define GMAC_VLAN_TAG_CTRL_OFS_SHIFT 2
-#define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
-#define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21
-#define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24)
-
-#define GMAC_VLAN_TAG_STRIP_NONE (0x0 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_PASS (0x1 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-
-/* MAC VLAN Tag Data/Filter */
-#define GMAC_VLAN_TAG_DATA_VID GENMASK(15, 0)
-#define GMAC_VLAN_TAG_DATA_VEN BIT(16)
-#define GMAC_VLAN_TAG_DATA_ETV BIT(17)
-
/* MAC RX Queue Enable */
#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2))
#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2)
@@ -177,23 +136,13 @@ enum power_event {
/* Energy Efficient Ethernet (EEE) for GMAC4
*
* LPI status, timer and control register offset
+ * For LPI control and status bit definitions, see common.h.
*/
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
#define GMAC4_LPI_ENTRY_TIMER 0xd8
#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc
-/* LPI control and status defines */
-#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
-#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */
-#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
-#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
-#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
-#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
-#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
-#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
-#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
-
/* MAC Debug bitmap */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
#define GMAC_DEBUG_TFCSTS_SHIFT 17
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c25781874aa7..9c2549d4100f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -18,6 +18,7 @@
#include "stmmac.h"
#include "stmmac_fpe.h"
#include "stmmac_pcs.h"
+#include "stmmac_vlan.h"
#include "dwmac4.h"
#include "dwmac5.h"
@@ -27,7 +28,7 @@ static void dwmac4_core_init(struct mac_device_info *hw,
struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
- u32 clk_rate;
+ unsigned long clk_rate;
value |= GMAC_CORE_INIT;
@@ -376,33 +377,46 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw,
GMAC_ADDR_LOW(reg_n));
}
-static void dwmac4_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwmac4_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ u32 value, mask;
- /* Enable the link status receive on RGMII, SGMII ore SMII
- * receive path and instruct the transmit to enter in LPI
- * state.
- */
- value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
+ if (mode == STMMAC_LPI_DISABLE) {
+ value = 0;
+ } else {
+ value = LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
- if (en_tx_lpi_clockgating)
- value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
+ if (mode == STMMAC_LPI_TIMER) {
+ /* Return ERANGE if the timer is larger than the
+ * register field.
+ */
+ if (et > STMMAC_ET_MAX)
+ return -ERANGE;
- writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
-}
+ /* Set the hardware LPI entry timer */
+ writel(et, ioaddr + GMAC4_LPI_ENTRY_TIMER);
-static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ /* Interpret a zero LPI entry timer to mean
+ * immediate entry into LPI mode.
+ */
+ if (et)
+ value |= LPI_CTRL_STATUS_LPIATE;
+ }
- value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
+ if (en_tx_lpi_clockgating)
+ value |= LPI_CTRL_STATUS_LPITCSE;
+ }
+
+ mask = LPI_CTRL_STATUS_LPIATE | LPI_CTRL_STATUS_LPIEN |
+ LPI_CTRL_STATUS_LPITXA | LPI_CTRL_STATUS_LPITCSE;
+
+ value |= readl(ioaddr + GMAC4_LPI_CTRL_STATUS) & ~mask;
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+
+ return 0;
}
static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
@@ -413,34 +427,13 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
if (link)
- value |= GMAC4_LPI_CTRL_STATUS_PLS;
+ value |= LPI_CTRL_STATUS_PLS;
else
- value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
+ value &= ~LPI_CTRL_STATUS_PLS;
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
}
-static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
-{
- void __iomem *ioaddr = hw->pcsr;
- int value = et & STMMAC_ET_MAX;
- int regval;
-
- /* Program LPI entry timer value into register */
- writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
-
- /* Enable/disable LPI entry timer */
- regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
-
- if (et)
- regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
- else
- regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
-
- writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
-}
-
static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -456,165 +449,6 @@ static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
}
-static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
-{
- void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- u32 val;
-
- val = readl(ioaddr + GMAC_VLAN_TAG);
- val &= ~GMAC_VLAN_TAG_VID;
- val |= GMAC_VLAN_TAG_ETV | vid;
-
- writel(val, ioaddr + GMAC_VLAN_TAG);
-}
-
-static int dwmac4_write_vlan_filter(struct net_device *dev,
- struct mac_device_info *hw,
- u8 index, u32 data)
-{
- void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- int ret;
- u32 val;
-
- if (index >= hw->num_vlan)
- return -EINVAL;
-
- writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
-
- val = readl(ioaddr + GMAC_VLAN_TAG);
- val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
- GMAC_VLAN_TAG_CTRL_CT |
- GMAC_VLAN_TAG_CTRL_OB);
- val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
-
- writel(val, ioaddr + GMAC_VLAN_TAG);
-
- ret = readl_poll_timeout(ioaddr + GMAC_VLAN_TAG, val,
- !(val & GMAC_VLAN_TAG_CTRL_OB),
- 1000, 500000);
- if (ret) {
- netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid)
-{
- int index = -1;
- u32 val = 0;
- int i, ret;
-
- if (vid > 4095)
- return -EINVAL;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- /* For single VLAN filter, VID 0 means VLAN promiscuous */
- if (vid == 0) {
- netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
- return -EPERM;
- }
-
- if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
- netdev_err(dev, "Only single VLAN ID supported\n");
- return -EPERM;
- }
-
- hw->vlan_filter[0] = vid;
- dwmac4_write_single_vlan(dev, vid);
-
- return 0;
- }
-
- /* Extended Rx VLAN Filter Enable */
- val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
-
- for (i = 0; i < hw->num_vlan; i++) {
- if (hw->vlan_filter[i] == val)
- return 0;
- else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
- index = i;
- }
-
- if (index == -1) {
- netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
- hw->num_vlan);
- return -EPERM;
- }
-
- ret = dwmac4_write_vlan_filter(dev, hw, index, val);
-
- if (!ret)
- hw->vlan_filter[index] = val;
-
- return ret;
-}
-
-static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid)
-{
- int i, ret = 0;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
- hw->vlan_filter[0] = 0;
- dwmac4_write_single_vlan(dev, 0);
- }
- return 0;
- }
-
- /* Extended Rx VLAN Filter Enable */
- for (i = 0; i < hw->num_vlan; i++) {
- if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
- ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
-
- if (!ret)
- hw->vlan_filter[i] = 0;
- else
- return ret;
- }
- }
-
- return ret;
-}
-
-static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
- u32 hash;
- u32 val;
- int i;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
- return;
- }
-
- /* Extended Rx VLAN Filter Enable */
- for (i = 0; i < hw->num_vlan; i++) {
- if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
- val = hw->vlan_filter[i];
- dwmac4_write_vlan_filter(dev, hw, i, val);
- }
- }
-
- hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
- if (hash & GMAC_VLAN_VLHT) {
- value = readl(ioaddr + GMAC_VLAN_TAG);
- value |= GMAC_VLAN_VTHM;
- writel(value, ioaddr + GMAC_VLAN_TAG);
- }
-}
-
static void dwmac4_set_filter(struct mac_device_info *hw,
struct net_device *dev)
{
@@ -849,17 +683,17 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
+ if (status & LPI_CTRL_STATUS_TLPIEN) {
ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
x->irq_tx_path_in_lpi_mode_n++;
}
- if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
+ if (status & LPI_CTRL_STATUS_TLPIEX) {
ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
x->irq_tx_path_exit_lpi_mode_n++;
}
- if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
+ if (status & LPI_CTRL_STATUS_RLPIEN)
x->irq_rx_path_in_lpi_mode_n++;
- if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
+ if (status & LPI_CTRL_STATUS_RLPIEX)
x->irq_rx_path_exit_lpi_mode_n++;
}
@@ -973,45 +807,6 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
writel(value, ioaddr + GMAC_CONFIG);
}
-static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
-
- value = readl(ioaddr + GMAC_VLAN_TAG);
-
- if (hash) {
- value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
- if (is_double) {
- value |= GMAC_VLAN_EDVLP;
- value |= GMAC_VLAN_ESVL;
- value |= GMAC_VLAN_DOVLTC;
- }
-
- writel(value, ioaddr + GMAC_VLAN_TAG);
- } else if (perfect_match) {
- u32 value = GMAC_VLAN_ETV;
-
- if (is_double) {
- value |= GMAC_VLAN_EDVLP;
- value |= GMAC_VLAN_ESVL;
- value |= GMAC_VLAN_DOVLTC;
- }
-
- writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
- } else {
- value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
- value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
- value &= ~GMAC_VLAN_DOVLTC;
- value &= ~GMAC_VLAN_VID;
-
- writel(value, ioaddr + GMAC_VLAN_TAG);
- }
-}
-
static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
{
u32 value = readl(ioaddr + GMAC_CONFIG);
@@ -1022,19 +817,6 @@ static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
writel(value, ioaddr + GMAC_CONFIG);
}
-static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- value = readl(ioaddr + GMAC_VLAN_INCL);
- value |= GMAC_VLAN_VLTI;
- value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
- value &= ~GMAC_VLAN_VLC;
- value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
- writel(value, ioaddr + GMAC_VLAN_INCL);
-}
-
static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
u32 addr)
{
@@ -1151,35 +933,6 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
return 0;
}
-static void dwmac4_rx_hw_vlan(struct mac_device_info *hw,
- struct dma_desc *rx_desc, struct sk_buff *skb)
-{
- if (hw->desc->get_rx_vlan_valid(rx_desc)) {
- u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
-
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
- }
-}
-
-static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_VLAN_TAG);
-
- value &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK;
-
- if (hw->hw_vlan_en)
- /* Always strip VLAN on Receive */
- value |= GMAC_VLAN_TAG_STRIP_ALL;
- else
- /* Do not strip VLAN on Receive */
- value |= GMAC_VLAN_TAG_STRIP_NONE;
-
- /* Enable outer VLAN Tag in Rx DMA descriptor */
- value |= GMAC_VLAN_TAG_CTRL_EVLRXS;
- writel(value, ioaddr + GMAC_VLAN_TAG);
-}
-
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.update_caps = dwmac4_update_caps,
@@ -1201,9 +954,7 @@ const struct stmmac_ops dwmac4_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1211,17 +962,10 @@ const struct stmmac_ops dwmac4_ops = {
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac410_ops = {
@@ -1245,9 +989,7 @@ const struct stmmac_ops dwmac410_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1256,18 +998,11 @@ const struct stmmac_ops dwmac410_ops = {
.set_filter = dwmac4_set_filter,
.flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac510_ops = {
@@ -1291,9 +1026,7 @@ const struct stmmac_ops dwmac510_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1306,51 +1039,13 @@ const struct stmmac_ops dwmac510_ops = {
.rxp_config = dwmac5_rxp_config,
.flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
-static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
-{
- u32 val, num_vlan;
-
- val = readl(ioaddr + GMAC_HW_FEATURE3);
- switch (val & GMAC_HW_FEAT_NRVF) {
- case 0:
- num_vlan = 1;
- break;
- case 1:
- num_vlan = 4;
- break;
- case 2:
- num_vlan = 8;
- break;
- case 3:
- num_vlan = 16;
- break;
- case 4:
- num_vlan = 24;
- break;
- case 5:
- num_vlan = 32;
- break;
- default:
- num_vlan = 1;
- }
-
- return num_vlan;
-}
-
int dwmac4_setup(struct stmmac_priv *priv)
{
struct mac_device_info *mac = priv->hw;
@@ -1382,7 +1077,7 @@ int dwmac4_setup(struct stmmac_priv *priv)
mac->mii.reg_mask = GENMASK(20, 16);
mac->mii.clk_csr_shift = 8;
mac->mii.clk_csr_mask = GENMASK(11, 8);
- mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
+ mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 1ce6f43d545a..806555976496 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -144,4 +144,7 @@
/* TDS3 use for both format (read and write back) */
#define RDES3_OWN BIT(31)
+extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
+extern const struct stmmac_desc_ops dwmac4_desc_ops;
+
#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index a04a79003692..0d408ee17f33 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -57,19 +57,6 @@
#define XGMAC_FILTER_PR BIT(0)
#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
#define XGMAC_MAX_HASH_TABLE 8
-#define XGMAC_VLAN_TAG 0x00000050
-#define XGMAC_VLAN_EDVLP BIT(26)
-#define XGMAC_VLAN_VTHM BIT(25)
-#define XGMAC_VLAN_DOVLTC BIT(20)
-#define XGMAC_VLAN_ESVL BIT(18)
-#define XGMAC_VLAN_ETV BIT(16)
-#define XGMAC_VLAN_VID GENMASK(15, 0)
-#define XGMAC_VLAN_HASH_TABLE 0x00000058
-#define XGMAC_VLAN_INCL 0x00000060
-#define XGMAC_VLAN_VLTI BIT(20)
-#define XGMAC_VLAN_CSVL BIT(19)
-#define XGMAC_VLAN_VLC GENMASK(17, 16)
-#define XGMAC_VLAN_VLC_SHIFT 16
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
@@ -112,14 +99,7 @@
#define XGMAC_MGKPKTEN BIT(1)
#define XGMAC_PWRDWN BIT(0)
#define XGMAC_LPI_CTRL 0x000000d0
-#define XGMAC_TXCGE BIT(21)
-#define XGMAC_LPITXA BIT(19)
-#define XGMAC_PLS BIT(17)
-#define XGMAC_LPITXEN BIT(16)
-#define XGMAC_RLPIEX BIT(3)
-#define XGMAC_RLPIEN BIT(2)
-#define XGMAC_TLPIEX BIT(1)
-#define XGMAC_TLPIEN BIT(0)
+/* For definitions, see LPI_CTRL_STATUS_xxx in common.h */
#define XGMAC_LPI_TIMER_CTRL 0x000000d4
#define XGMAC_HW_FEATURE0 0x0000011c
#define XGMAC_HWFEAT_EDMA BIT(31)
@@ -484,6 +464,7 @@
#define XGMAC_RDES3_RSV BIT(26)
#define XGMAC_RDES3_L34T GENMASK(23, 20)
#define XGMAC_RDES3_L34T_SHIFT 20
+#define XGMAC_RDES3_ET_LT GENMASK(19, 16)
#define XGMAC_L34T_IP4TCP 0x1
#define XGMAC_L34T_IP4UDP 0x2
#define XGMAC_L34T_IP6TCP 0x9
@@ -493,4 +474,20 @@
#define XGMAC_RDES3_TSD BIT(6)
#define XGMAC_RDES3_TSA BIT(4)
+/* RDES0 (write back format) */
+#define XGMAC_RDES0_VLAN_TAG_MASK GENMASK(15, 0)
+
+/* Error Type or L2 Type(ET/LT) Field Number */
+#define XGMAC_ET_LT_VLAN_STAG 8
+#define XGMAC_ET_LT_VLAN_CTAG 9
+#define XGMAC_ET_LT_DVLAN_CTAG_CTAG 10
+#define XGMAC_ET_LT_DVLAN_STAG_STAG 11
+#define XGMAC_ET_LT_DVLAN_CTAG_STAG 12
+#define XGMAC_ET_LT_DVLAN_STAG_CTAG 13
+
+extern const struct stmmac_ops dwxgmac210_ops;
+extern const struct stmmac_ops dwxlgmac2_ops;
+extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
+extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
+
#endif /* __STMMAC_DWXGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 9a60a6e8f633..6cadf8de4fdf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -10,6 +10,7 @@
#include "stmmac.h"
#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
+#include "stmmac_vlan.h"
#include "dwxlgmac2.h"
#include "dwxgmac2.h"
@@ -316,17 +317,17 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
if (stat & XGMAC_LPIIS) {
u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
- if (lpi & XGMAC_TLPIEN) {
+ if (lpi & LPI_CTRL_STATUS_TLPIEN) {
ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
x->irq_tx_path_in_lpi_mode_n++;
}
- if (lpi & XGMAC_TLPIEX) {
+ if (lpi & LPI_CTRL_STATUS_TLPIEX) {
ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
x->irq_tx_path_exit_lpi_mode_n++;
}
- if (lpi & XGMAC_RLPIEN)
+ if (lpi & LPI_CTRL_STATUS_RLPIEN)
x->irq_rx_path_in_lpi_mode_n++;
- if (lpi & XGMAC_RLPIEX)
+ if (lpi & LPI_CTRL_STATUS_RLPIEX)
x->irq_rx_path_exit_lpi_mode_n++;
}
@@ -425,29 +426,28 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
addr[5] = (hi_addr >> 8) & 0xff;
}
-static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwxgmac2_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
- value = readl(ioaddr + XGMAC_LPI_CTRL);
-
- value |= XGMAC_LPITXEN | XGMAC_LPITXA;
- if (en_tx_lpi_clockgating)
- value |= XGMAC_TXCGE;
-
- writel(value, ioaddr + XGMAC_LPI_CTRL);
-}
-
-static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ if (mode == STMMAC_LPI_TIMER)
+ return -EOPNOTSUPP;
value = readl(ioaddr + XGMAC_LPI_CTRL);
- value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
+ if (mode == STMMAC_LPI_FORCED) {
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ if (en_tx_lpi_clockgating)
+ value |= LPI_CTRL_STATUS_LPITCSE;
+ } else {
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA |
+ LPI_CTRL_STATUS_LPITCSE);
+ }
writel(value, ioaddr + XGMAC_LPI_CTRL);
+
+ return 0;
}
static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
@@ -457,9 +457,9 @@ static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
value = readl(ioaddr + XGMAC_LPI_CTRL);
if (link)
- value |= XGMAC_PLS;
+ value |= LPI_CTRL_STATUS_PLS;
else
- value &= ~XGMAC_PLS;
+ value &= ~LPI_CTRL_STATUS_PLS;
writel(value, ioaddr + XGMAC_LPI_CTRL);
}
@@ -615,76 +615,6 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
return 0;
}
-static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double)
-{
- void __iomem *ioaddr = hw->pcsr;
-
- writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
-
- if (hash) {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value |= XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
- if (is_double) {
- value |= XGMAC_VLAN_EDVLP;
- value |= XGMAC_VLAN_ESVL;
- value |= XGMAC_VLAN_DOVLTC;
- } else {
- value &= ~XGMAC_VLAN_EDVLP;
- value &= ~XGMAC_VLAN_ESVL;
- value &= ~XGMAC_VLAN_DOVLTC;
- }
-
- value &= ~XGMAC_VLAN_VID;
- writel(value, ioaddr + XGMAC_VLAN_TAG);
- } else if (perfect_match) {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value |= XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value &= ~XGMAC_VLAN_VTHM;
- value |= XGMAC_VLAN_ETV;
- if (is_double) {
- value |= XGMAC_VLAN_EDVLP;
- value |= XGMAC_VLAN_ESVL;
- value |= XGMAC_VLAN_DOVLTC;
- } else {
- value &= ~XGMAC_VLAN_EDVLP;
- value &= ~XGMAC_VLAN_ESVL;
- value &= ~XGMAC_VLAN_DOVLTC;
- }
-
- value &= ~XGMAC_VLAN_VID;
- writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
- } else {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value &= ~XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
- value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
- value &= ~XGMAC_VLAN_DOVLTC;
- value &= ~XGMAC_VLAN_VID;
-
- writel(value, ioaddr + XGMAC_VLAN_TAG);
- }
-}
-
struct dwxgmac3_error_desc {
bool valid;
const char *desc;
@@ -1301,19 +1231,6 @@ static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
writel(value, ioaddr + XGMAC_TX_CONFIG);
}
-static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- value = readl(ioaddr + XGMAC_VLAN_INCL);
- value |= XGMAC_VLAN_VLTI;
- value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
- value &= ~XGMAC_VLAN_VLC;
- value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
- writel(value, ioaddr + XGMAC_VLAN_INCL);
-}
-
static int dwxgmac2_filter_wait(struct mac_device_info *hw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -1525,8 +1442,7 @@ const struct stmmac_ops dwxgmac210_ops = {
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = dwxgmac2_set_eee_mode,
- .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_lpi_mode = dwxgmac2_set_lpi_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
.debug = NULL,
@@ -1536,12 +1452,10 @@ const struct stmmac_ops dwxgmac210_ops = {
.safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
.rss_configure = dwxgmac2_rss_configure,
- .update_vlan_hash = dwxgmac2_update_vlan_hash,
.rxp_config = dwxgmac3_rxp_config,
.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
.flex_pps_config = dwxgmac2_flex_pps_config,
.sarc_configure = dwxgmac2_sarc_configure,
- .enable_vlan = dwxgmac2_enable_vlan,
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
@@ -1582,8 +1496,7 @@ const struct stmmac_ops dwxlgmac2_ops = {
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = dwxgmac2_set_eee_mode,
- .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_lpi_mode = dwxgmac2_set_lpi_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
.debug = NULL,
@@ -1593,12 +1506,10 @@ const struct stmmac_ops dwxlgmac2_ops = {
.safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
.rss_configure = dwxgmac2_rss_configure,
- .update_vlan_hash = dwxgmac2_update_vlan_hash,
.rxp_config = dwxgmac3_rxp_config,
.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
.flex_pps_config = dwxgmac2_flex_pps_config,
.sarc_configure = dwxgmac2_sarc_configure,
- .enable_vlan = dwxgmac2_enable_vlan,
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
@@ -1641,6 +1552,7 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
mac->mii.reg_mask = GENMASK(15, 0);
mac->mii.clk_csr_shift = 19;
mac->mii.clk_csr_mask = GENMASK(21, 19);
+ mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 389aad7b5c1e..a2980482fcce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -4,6 +4,7 @@
* stmmac XGMAC support.
*/
+#include <linux/bitfield.h>
#include <linux/stmmac.h>
#include "common.h"
#include "dwxgmac2.h"
@@ -69,6 +70,21 @@ static int dwxgmac2_get_tx_ls(struct dma_desc *p)
return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0;
}
+static u16 dwxgmac2_wrback_get_rx_vlan_tci(struct dma_desc *p)
+{
+ return le32_to_cpu(p->des0) & XGMAC_RDES0_VLAN_TAG_MASK;
+}
+
+static bool dwxgmac2_wrback_get_rx_vlan_valid(struct dma_desc *p)
+{
+ u32 et_lt;
+
+ et_lt = FIELD_GET(XGMAC_RDES3_ET_LT, le32_to_cpu(p->des3));
+
+ return et_lt >= XGMAC_ET_LT_VLAN_STAG &&
+ et_lt <= XGMAC_ET_LT_DVLAN_STAG_CTAG;
+}
+
static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe)
{
return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL);
@@ -351,6 +367,8 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.set_tx_owner = dwxgmac2_set_tx_owner,
.set_rx_owner = dwxgmac2_set_rx_owner,
.get_tx_ls = dwxgmac2_get_tx_ls,
+ .get_rx_vlan_tci = dwxgmac2_wrback_get_rx_vlan_tci,
+ .get_rx_vlan_valid = dwxgmac2_wrback_get_rx_vlan_valid,
.get_rx_frame_len = dwxgmac2_get_rx_frame_len,
.enable_tx_timestamp = dwxgmac2_enable_tx_timestamp,
.get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index a72d336a8350..99635b37044a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -9,6 +9,9 @@
#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
#include "stmmac_est.h"
+#include "stmmac_vlan.h"
+#include "dwmac4_descs.h"
+#include "dwxgmac2.h"
static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
{
@@ -118,6 +121,7 @@ static const struct stmmac_hwif_entry {
const void *tc;
const void *mmc;
const void *est;
+ const void *vlan;
int (*setup)(struct stmmac_priv *priv);
int (*quirks)(struct stmmac_priv *priv);
} stmmac_hw[] = {
@@ -173,6 +177,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac4_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
@@ -195,6 +200,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac410_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
@@ -217,6 +223,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac410_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
@@ -239,6 +246,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac510_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
@@ -262,10 +270,11 @@ static const struct stmmac_hwif_entry {
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
.mac = &dwxgmac210_ops,
+ .vlan = &dwxgmac210_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
- .tc = &dwxgmac_tc_ops,
+ .tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxgmac2_setup,
@@ -285,10 +294,11 @@ static const struct stmmac_hwif_entry {
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
.mac = &dwxlgmac2_ops,
+ .vlan = &dwxlgmac2_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
- .tc = &dwxgmac_tc_ops,
+ .tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxlgmac2_setup,
@@ -366,6 +376,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
mac->tc = mac->tc ? : entry->tc;
mac->mmc = mac->mmc ? : entry->mmc;
mac->est = mac->est ? : entry->est;
+ mac->vlan = mac->vlan ? : entry->vlan;
priv->hw = mac;
priv->fpe_cfg.reg = entry->regs.fpe_reg;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 64f8ed67dcc4..ae4efffb785f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -306,6 +306,12 @@ struct stmmac_pps_cfg;
struct stmmac_rss;
struct stmmac_est;
+enum stmmac_lpi_mode {
+ STMMAC_LPI_DISABLE,
+ STMMAC_LPI_FORCED,
+ STMMAC_LPI_TIMER,
+};
+
/* Helpers to program the MAC core */
struct stmmac_ops {
/* MAC core initialization */
@@ -360,10 +366,9 @@ struct stmmac_ops {
unsigned int reg_n);
void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
unsigned int reg_n);
- void (*set_eee_mode)(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating);
- void (*reset_eee_mode)(struct mac_device_info *hw);
- void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et);
+ int (*set_lpi_mode)(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -393,21 +398,6 @@ struct stmmac_ops {
/* RSS */
int (*rss_configure)(struct mac_device_info *hw,
struct stmmac_rss *cfg, u32 num_rxq);
- /* VLAN */
- void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double);
- void (*enable_vlan)(struct mac_device_info *hw, u32 type);
- void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
- struct sk_buff *skb);
- void (*set_hw_vlan_mode)(struct mac_device_info *hw);
- int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid);
- int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid);
- void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw);
/* TX Timestamp */
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
/* Source Address Insertion / Replacement */
@@ -467,12 +457,8 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_umac_addr, __args)
#define stmmac_get_umac_addr(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, get_umac_addr, __args)
-#define stmmac_set_eee_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_eee_mode, __args)
-#define stmmac_reset_eee_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args)
-#define stmmac_set_eee_lpi_timer(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args)
+#define stmmac_set_lpi_mode(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, set_lpi_mode, __args)
#define stmmac_set_eee_timer(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_eee_timer, __args)
#define stmmac_set_eee_pls(__priv, __args...) \
@@ -497,20 +483,6 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
#define stmmac_rss_configure(__priv, __args...) \
stmmac_do_callback(__priv, mac, rss_configure, __args)
-#define stmmac_update_vlan_hash(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
-#define stmmac_enable_vlan(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
-#define stmmac_rx_hw_vlan(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, rx_hw_vlan, __args)
-#define stmmac_set_hw_vlan_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_hw_vlan_mode, __args)
-#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args)
-#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args)
-#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args)
#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
#define stmmac_sarc_configure(__priv, __args...) \
@@ -658,6 +630,39 @@ struct stmmac_est_ops {
#define stmmac_est_irq_status(__priv, __args...) \
stmmac_do_void_callback(__priv, est, irq_status, __args)
+struct stmmac_vlan_ops {
+ /* VLAN */
+ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double);
+ void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
+ struct sk_buff *skb);
+ void (*set_hw_vlan_mode)(struct mac_device_info *hw);
+ int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw);
+};
+
+#define stmmac_update_vlan_hash(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, update_vlan_hash, __args)
+#define stmmac_enable_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, enable_vlan, __args)
+#define stmmac_rx_hw_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, rx_hw_vlan, __args)
+#define stmmac_set_hw_vlan_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, set_hw_vlan_mode, __args)
+#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, vlan, add_hw_vlan_rx_fltr, __args)
+#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, vlan, del_hw_vlan_rx_fltr, __args)
+#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, restore_hw_vlan_rx_fltr, __args)
+
struct stmmac_regs_off {
const struct stmmac_fpe_reg *fpe_reg;
u32 ptp_off;
@@ -665,6 +670,15 @@ struct stmmac_regs_off {
u32 est_off;
};
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
+
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern const struct stmmac_hwtimestamp dwmac1000_ptp;
+
+extern const struct stmmac_mode_ops ring_mode_ops;
+extern const struct stmmac_mode_ops chain_mode_ops;
+
extern const struct stmmac_ops dwmac100_ops;
extern const struct stmmac_dma_ops dwmac100_dma_ops;
extern const struct stmmac_ops dwmac1000_ops;
@@ -676,14 +690,6 @@ extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
extern const struct stmmac_tc_ops dwmac4_tc_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
-extern const struct stmmac_tc_ops dwxgmac_tc_ops;
-extern const struct stmmac_ops dwxgmac210_ops;
-extern const struct stmmac_ops dwxlgmac2_ops;
-extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
-extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
-extern const struct stmmac_mmc_ops dwmac_mmc_ops;
-extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
-extern const struct stmmac_est_ops dwmac510_est_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 5d1ea3e07459..1cba39fb2c44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -139,4 +139,7 @@ struct stmmac_counters {
unsigned int mmc_rx_fpe_fragment_cntr;
};
+extern const struct stmmac_mmc_ops dwmac_mmc_ops;
+extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
+
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 1d86439b8a14..cda09cf5dcca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -106,6 +106,8 @@ struct stmmac_metadata_request {
struct stmmac_priv *priv;
struct dma_desc *tx_desc;
bool *set_ic;
+ struct dma_edesc *edesc;
+ int tbs;
};
struct stmmac_xsk_tx_complete {
@@ -126,7 +128,7 @@ struct stmmac_rx_queue {
unsigned int cur_rx;
unsigned int dirty_rx;
unsigned int buf_alloc_num;
- u32 rx_zeroc_thresh;
+ unsigned int napi_skb_frag_size;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
unsigned int state_saved;
@@ -147,21 +149,9 @@ struct stmmac_channel {
};
struct stmmac_fpe_cfg {
- /* Serialize access to MAC Merge state between ethtool requests
- * and link state updates.
- */
- spinlock_t lock;
-
+ struct ethtool_mmsv mmsv;
const struct stmmac_fpe_reg *reg;
- u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
-
- enum ethtool_mm_verify_status status;
- struct timer_list verify_timer;
- bool verify_enabled;
- int verify_retries;
- bool pmac_enabled;
- u32 verify_time;
- bool tx_enabled;
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
};
struct stmmac_tc_entry {
@@ -257,7 +247,7 @@ struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
- u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
+ u32 rx_coal_frames[MTL_MAX_RX_QUEUES];
int hwts_tx_en;
bool tx_path_in_lpi_mode;
@@ -265,9 +255,7 @@ struct stmmac_priv {
int sph;
int sph_cap;
u32 sarc_type;
-
- unsigned int rx_copybreak;
- u32 rx_riwt[MTL_MAX_TX_QUEUES];
+ u32 rx_riwt[MTL_MAX_RX_QUEUES];
int hwts_rx_en;
void __iomem *ioaddr;
@@ -282,9 +270,7 @@ struct stmmac_priv {
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
- int speed;
- unsigned int flow_ctrl;
- unsigned int pause;
+ unsigned int pause_time;
struct mii_bus *mii;
struct phylink_config phylink_config;
@@ -307,16 +293,15 @@ struct stmmac_priv {
int clk_csr;
struct timer_list eee_ctrl_timer;
int lpi_irq;
- int eee_enabled;
- int eee_active;
- int tx_lpi_timer;
- int tx_lpi_enabled;
- int eee_tw_timer;
+ u32 tx_lpi_timer;
+ bool tx_lpi_clk_stop;
+ bool eee_enabled;
+ bool eee_active;
bool eee_sw_timer_en;
unsigned int mode;
unsigned int chain_mode;
int extend_desc;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
unsigned int default_addend;
@@ -346,7 +331,7 @@ struct stmmac_priv {
char int_name_sfty[IFNAMSIZ + 10];
char int_name_sfty_ce[IFNAMSIZ + 10];
char int_name_sfty_ue[IFNAMSIZ + 10];
- char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
+ char int_name_rx_irq[MTL_MAX_RX_QUEUES][IFNAMSIZ + 14];
char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18];
#ifdef CONFIG_DEBUG_FS
@@ -407,25 +392,17 @@ void stmmac_dvr_remove(struct device *dev);
int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
-void stmmac_disable_eee_mode(struct stmmac_priv *priv);
-bool stmmac_eee_init(struct stmmac_priv *priv);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed);
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
return !!priv->xdp_prog;
}
-static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
-{
- if (stmmac_xdp_is_enabled(priv))
- return XDP_PACKET_HEADROOM;
-
- return 0;
-}
-
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
index c9693f77e1f6..ac6f2e3a3fcd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -32,6 +32,11 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
int i, ret = 0;
u32 ctrl;
+ if (!ptp_rate) {
+ netdev_warn(priv->dev, "Invalid PTP rate");
+ return -EINVAL;
+ }
+
ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false);
ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false);
ret |= est_write(est_addr, EST_TER, cfg->ter, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
index 7a858c566e7e..d247fa383a6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
@@ -62,3 +62,5 @@
#define EST_SRWO BIT(0)
#define EST_GCL_DATA 0x00000034
+
+extern const struct stmmac_est_ops dwmac510_est_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1d77389ce953..f702f7b7bf9f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -37,7 +37,7 @@
#define ETHTOOL_DMA_OFFSET 55
struct stmmac_stats {
- char stat_string[ETH_GSTRING_LEN];
+ char stat_string[ETH_GSTRING_LEN] __nonstring;
int sizeof_stat;
int stat_offset;
};
@@ -654,7 +654,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
(*(u32 *)p);
}
}
- if (priv->eee_enabled) {
+ if (priv->dma_cap.eee) {
int val = phylink_get_eee_err(priv->phylink);
if (val)
priv->xstats.phy_eee_wakeup_error_n = val;
@@ -898,9 +898,6 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
- edata->tx_lpi_timer = priv->tx_lpi_timer;
- edata->tx_lpi_enabled = priv->tx_lpi_enabled;
-
return phylink_ethtool_get_eee(priv->phylink, edata);
}
@@ -908,29 +905,11 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int ret;
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
- if (priv->tx_lpi_enabled != edata->tx_lpi_enabled)
- netdev_warn(priv->dev,
- "Setting EEE tx-lpi is not supported\n");
-
- if (!edata->eee_enabled)
- stmmac_disable_eee_mode(priv);
-
- ret = phylink_ethtool_set_eee(priv->phylink, edata);
- if (ret)
- return ret;
-
- if (edata->eee_enabled &&
- priv->tx_lpi_timer != edata->tx_lpi_timer) {
- priv->tx_lpi_timer = edata->tx_lpi_timer;
- stmmac_eee_init(priv);
- }
-
- return 0;
+ return phylink_ethtool_set_eee(priv->phylink, edata);
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
@@ -1227,77 +1206,20 @@ static int stmmac_get_ts_info(struct net_device *dev,
return ethtool_op_get_ts_info(dev, info);
}
-static int stmmac_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = priv->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int stmmac_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- priv->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static int stmmac_get_mm(struct net_device *ndev,
struct ethtool_mm_state *state)
{
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned long flags;
u32 frag_size;
if (!stmmac_fpe_supported(priv))
return -EOPNOTSUPP;
- spin_lock_irqsave(&priv->fpe_cfg.lock, flags);
-
- state->max_verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
- state->verify_enabled = priv->fpe_cfg.verify_enabled;
- state->pmac_enabled = priv->fpe_cfg.pmac_enabled;
- state->verify_time = priv->fpe_cfg.verify_time;
- state->tx_enabled = priv->fpe_cfg.tx_enabled;
- state->verify_status = priv->fpe_cfg.status;
state->rx_min_frag_size = ETH_ZLEN;
-
- /* FPE active if common tx_enabled and
- * (verification success or disabled(forced))
- */
- if (state->tx_enabled &&
- (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
- state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED))
- state->tx_active = true;
- else
- state->tx_active = false;
-
frag_size = stmmac_fpe_get_add_frag_size(priv);
state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size);
- spin_unlock_irqrestore(&priv->fpe_cfg.lock, flags);
+ ethtool_mmsv_get_mm(&priv->fpe_cfg.mmsv, state);
return 0;
}
@@ -1306,8 +1228,6 @@ static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(ndev);
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- unsigned long flags;
u32 frag_size;
int err;
@@ -1316,23 +1236,8 @@ static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
if (err)
return err;
- /* Wait for the verification that's currently in progress to finish */
- timer_shutdown_sync(&fpe_cfg->verify_timer);
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- fpe_cfg->verify_enabled = cfg->verify_enabled;
- fpe_cfg->pmac_enabled = cfg->pmac_enabled;
- fpe_cfg->verify_time = cfg->verify_time;
- fpe_cfg->tx_enabled = cfg->tx_enabled;
-
- if (!cfg->verify_enabled)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
-
stmmac_fpe_set_add_frag_size(priv, frag_size);
- stmmac_fpe_apply(priv);
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
+ ethtool_mmsv_set_mm(&priv->fpe_cfg.mmsv, cfg);
return 0;
}
@@ -1390,8 +1295,6 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.set_per_queue_coalesce = stmmac_set_per_queue_coalesce,
.get_channels = stmmac_get_channels,
.set_channels = stmmac_set_channels,
- .get_tunable = stmmac_get_tunable,
- .set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
.get_mm = stmmac_get_mm,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
index 3a4bee029c7f..75b470ee621a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
@@ -27,12 +27,6 @@
#define STMMAC_MAC_FPE_CTRL_STS_SVER BIT(1)
#define STMMAC_MAC_FPE_CTRL_STS_EFPE BIT(0)
-/* FPE link-partner hand-shaking mPacket type */
-enum stmmac_mpacket_type {
- MPACKET_VERIFY = 0,
- MPACKET_RESPONSE = 1,
-};
-
struct stmmac_fpe_reg {
const u32 mac_fpe_reg; /* offset of MAC_FPE_CTRL_STS */
const u32 mtl_fpe_reg; /* offset of MTL_FPE_CTRL_STS */
@@ -48,10 +42,10 @@ bool stmmac_fpe_supported(struct stmmac_priv *priv)
priv->hw->mac->fpe_map_preemption_class;
}
-static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
- bool pmac_enable)
+static void stmmac_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
{
- struct stmmac_fpe_cfg *cfg = &priv->fpe_cfg;
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
const struct stmmac_fpe_reg *reg = cfg->reg;
u32 num_rxq = priv->plat->rx_queues_to_use;
void __iomem *ioaddr = priv->ioaddr;
@@ -68,6 +62,15 @@ static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
cfg->fpe_csr = 0;
}
writel(cfg->fpe_csr, ioaddr + reg->mac_fpe_reg);
+}
+
+static void stmmac_fpe_configure_pmac(struct ethtool_mmsv *mmsv, bool pmac_enable)
+{
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
+ const struct stmmac_fpe_reg *reg = cfg->reg;
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value;
value = readl(ioaddr + reg->int_en_reg);
@@ -85,47 +88,45 @@ static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
writel(value, ioaddr + reg->int_en_reg);
}
-static void stmmac_fpe_send_mpacket(struct stmmac_priv *priv,
- enum stmmac_mpacket_type type)
+static void stmmac_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
+ enum ethtool_mpacket type)
{
- const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
+ const struct stmmac_fpe_reg *reg = cfg->reg;
void __iomem *ioaddr = priv->ioaddr;
- u32 value = priv->fpe_cfg.fpe_csr;
+ u32 value = cfg->fpe_csr;
- if (type == MPACKET_VERIFY)
+ if (type == ETHTOOL_MPACKET_VERIFY)
value |= STMMAC_MAC_FPE_CTRL_STS_SVER;
- else if (type == MPACKET_RESPONSE)
+ else if (type == ETHTOOL_MPACKET_RESPONSE)
value |= STMMAC_MAC_FPE_CTRL_STS_SRSP;
writel(value, ioaddr + reg->mac_fpe_reg);
}
+static const struct ethtool_mmsv_ops stmmac_mmsv_ops = {
+ .configure_tx = stmmac_fpe_configure_tx,
+ .configure_pmac = stmmac_fpe_configure_pmac,
+ .send_mpacket = stmmac_fpe_send_mpacket,
+};
+
static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
{
struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ struct ethtool_mmsv *mmsv = &fpe_cfg->mmsv;
- /* This is interrupt context, just spin_lock() */
- spin_lock(&fpe_cfg->lock);
-
- if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
- goto unlock_out;
+ if (status == FPE_EVENT_UNKNOWN)
+ return;
- /* LP has sent verify mPacket */
if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
- stmmac_fpe_send_mpacket(priv, MPACKET_RESPONSE);
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET);
- /* Local has sent verify mPacket */
- if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER)
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
- /* LP has sent response mPacket */
- if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
- fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
-
-unlock_out:
- spin_unlock(&fpe_cfg->lock);
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET);
}
void stmmac_fpe_irq_status(struct stmmac_priv *priv)
@@ -164,119 +165,16 @@ void stmmac_fpe_irq_status(struct stmmac_priv *priv)
stmmac_fpe_event_status(priv, status);
}
-/**
- * stmmac_fpe_verify_timer - Timer for MAC Merge verification
- * @t: timer_list struct containing private info
- *
- * Verify the MAC Merge capability in the local TX direction, by
- * transmitting Verify mPackets up to 3 times. Wait until link
- * partner responds with a Response mPacket, otherwise fail.
- */
-static void stmmac_fpe_verify_timer(struct timer_list *t)
-{
- struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
- struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
- fpe_cfg);
- unsigned long flags;
- bool rearm = false;
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- switch (fpe_cfg->status) {
- case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
- case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
- if (fpe_cfg->verify_retries != 0) {
- stmmac_fpe_send_mpacket(priv, MPACKET_VERIFY);
- rearm = true;
- } else {
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
- }
-
- fpe_cfg->verify_retries--;
- break;
-
- case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
- stmmac_fpe_configure(priv, true, true);
- break;
-
- default:
- break;
- }
-
- if (rearm) {
- mod_timer(&fpe_cfg->verify_timer,
- jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
- }
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
-}
-
-static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
-{
- if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
- fpe_cfg->verify_enabled &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
- timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
- mod_timer(&fpe_cfg->verify_timer, jiffies);
- }
-}
-
void stmmac_fpe_init(struct stmmac_priv *priv)
{
- priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
- priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
- priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
- timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
- spin_lock_init(&priv->fpe_cfg.lock);
+ ethtool_mmsv_init(&priv->fpe_cfg.mmsv, priv->dev,
+ &stmmac_mmsv_ops);
if ((!priv->fpe_cfg.reg || !priv->hw->mac->fpe_map_preemption_class) &&
priv->dma_cap.fpesel)
dev_info(priv->device, "FPE is not supported by driver.\n");
}
-void stmmac_fpe_apply(struct stmmac_priv *priv)
-{
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
-
- /* If verification is disabled, configure FPE right away.
- * Otherwise let the timer code do it.
- */
- if (!fpe_cfg->verify_enabled) {
- stmmac_fpe_configure(priv, fpe_cfg->tx_enabled,
- fpe_cfg->pmac_enabled);
- } else {
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
- fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
-
- if (netif_running(priv->dev))
- stmmac_fpe_verify_timer_arm(fpe_cfg);
- }
-}
-
-void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
-{
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- unsigned long flags;
-
- timer_shutdown_sync(&fpe_cfg->verify_timer);
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- if (is_up && fpe_cfg->pmac_enabled) {
- /* VERIFY process requires pmac enabled when NIC comes up */
- stmmac_fpe_configure(priv, false, true);
-
- /* New link => maybe new partner => new verification process */
- stmmac_fpe_apply(priv);
- } else {
- /* No link => turn off EFPE */
- stmmac_fpe_configure(priv, false, false);
- }
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
-}
-
int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv)
{
const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
index b884eac7142d..3fc46acf7001 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
@@ -9,15 +9,10 @@
#include <linux/types.h>
#include <linux/netdevice.h>
-#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3
-#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128
-
struct stmmac_priv;
-void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up);
bool stmmac_fpe_supported(struct stmmac_priv *priv);
void stmmac_fpe_init(struct stmmac_priv *priv);
-void stmmac_fpe_apply(struct stmmac_priv *priv);
void stmmac_fpe_irq_status(struct stmmac_priv *priv);
int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv);
void stmmac_fpe_set_add_frag_size(struct stmmac_priv *priv, u32 add_frag_size);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 0f59aa982604..e2840fa241f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -222,7 +222,7 @@ static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
u64 ns;
ns = readl(ptpaddr + PTP_ATNR);
- ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
+ ns += (u64)readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
*ptp_time = ns;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9b262cdad60b..b948df1bff9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -77,7 +77,6 @@ module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
-#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256
@@ -89,33 +88,32 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_XDP_TX BIT(1)
#define STMMAC_XDP_REDIRECT BIT(2)
-static int flow_ctrl = FLOW_AUTO;
+static int flow_ctrl = 0xdead;
module_param(flow_ctrl, int, 0644);
-MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
static int pause = PAUSE_TIME;
module_param(pause, int, 0644);
-MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
module_param(tc, int, 0644);
MODULE_PARM_DESC(tc, "DMA threshold control value");
+/* This is unused */
#define DEFAULT_BUFSIZE 1536
static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, 0644);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
-#define STMMAC_RX_COPYBREAK 256
-
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
#define STMMAC_DEFAULT_LPI_TIMER 1000
-static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
-module_param(eee_timer, int, 0644);
+static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, uint, 0644);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
@@ -181,6 +179,38 @@ int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
/**
+ * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
+ * @bsp_priv: BSP private data structure (unused)
+ * @clk_tx_i: the transmit clock
+ * @interface: the selected interface mode
+ * @speed: the speed that the MAC will be operating at
+ *
+ * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
+ * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
+ * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
+ * the plat_data->set_clk_tx_rate method directly, call it via their own
+ * implementation, or implement their own method should they have more
+ * complex requirements. It is intended to only be used in this method.
+ *
+ * plat_data->clk_tx_i must be filled in.
+ */
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
+{
+ long rate = rgmii_clock(speed);
+
+ /* Silently ignore unsupported speeds as rgmii_clock() only
+ * supports 10, 100 and 1000Mbps. We do not want to spit
+ * errors for 2500 and higher speeds here.
+ */
+ if (rate < 0)
+ return 0;
+
+ return clk_set_rate(clk_tx_i, rate);
+}
+EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
+
+/**
* stmmac_verify_args - verify the driver parameters.
* Description: it checks the driver parameters and set a default in case of
* errors.
@@ -189,16 +219,11 @@ static void stmmac_verify_args(void)
{
if (unlikely(watchdog < 0))
watchdog = TX_TIMEO;
- if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
- buf_sz = DEFAULT_BUFSIZE;
- if (unlikely(flow_ctrl > 1))
- flow_ctrl = FLOW_AUTO;
- else if (likely(flow_ctrl < 0))
- flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
- if (eee_timer < 0)
- eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+
+ if (flow_ctrl != 0xdead)
+ pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
}
static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
@@ -301,7 +326,7 @@ static void stmmac_global_err(struct stmmac_priv *priv)
*/
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
{
- u32 clk_rate;
+ unsigned long clk_rate;
clk_rate = clk_get_rate(priv->plat->stmmac_clk);
@@ -325,6 +350,10 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_150_250M;
else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
priv->clk_csr = STMMAC_CSR_250_300M;
+ else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
+ priv->clk_csr = STMMAC_CSR_300_500M;
+ else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
+ priv->clk_csr = STMMAC_CSR_500_800M;
}
if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
@@ -391,23 +420,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
return dirty;
}
-static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
-{
- int tx_lpi_timer;
-
- /* Clear/set the SW EEE timer flag based on LPI ET enablement */
- priv->eee_sw_timer_en = en ? 0 : 1;
- tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
- stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
-}
-
-/**
- * stmmac_enable_eee_mode - check and enter in LPI mode
- * @priv: driver private structure
- * Description: this function is to verify and enter in LPI mode in case of
- * EEE.
- */
-static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
@@ -417,31 +430,45 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return -EBUSY; /* still unfinished work */
+ return true; /* still unfinished work */
}
- /* Check and enter in LPI mode */
- if (!priv->tx_path_in_lpi_mode)
- stmmac_set_eee_mode(priv, priv->hw,
- priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
- return 0;
+ return false;
+}
+
+static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
+{
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/**
- * stmmac_disable_eee_mode - disable and exit from LPI mode
+ * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
* @priv: driver private structure
- * Description: this function is to exit and disable EEE in case of
- * LPI state is true. This is called by the xmit.
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
*/
-void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
{
- if (!priv->eee_sw_timer_en) {
- stmmac_lpi_entry_timer_config(priv, 0);
+ if (stmmac_eee_tx_busy(priv)) {
+ stmmac_restart_sw_lpi_timer(priv);
return;
}
- stmmac_reset_eee_mode(priv, priv->hw);
- del_timer_sync(&priv->eee_ctrl_timer);
+ /* Check and enter in LPI mode */
+ if (!priv->tx_path_in_lpi_mode)
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
+ priv->tx_lpi_clk_stop, 0);
+}
+
+/**
+ * stmmac_stop_sw_lpi - stop transmitting LPI
+ * @priv: driver private structure
+ * Description: When using software-controlled LPI, stop transmitting LPI state.
+ */
+static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
+{
+ timer_delete_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
priv->tx_path_in_lpi_mode = false;
}
@@ -454,69 +481,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
*/
static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
-
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
-}
-
-/**
- * stmmac_eee_init - init EEE
- * @priv: driver private structure
- * Description:
- * if the GMAC supports the EEE (from the HW cap reg) and the phy device
- * can also manage EEE, this function enable the LPI state and start related
- * timer.
- */
-bool stmmac_eee_init(struct stmmac_priv *priv)
-{
- int eee_tw_timer = priv->eee_tw_timer;
-
- /* Check if MAC core supports the EEE feature. */
- if (!priv->dma_cap.eee)
- return false;
-
- mutex_lock(&priv->lock);
-
- /* Check if it needs to be deactivated */
- if (!priv->eee_active) {
- if (priv->eee_enabled) {
- netdev_dbg(priv->dev, "disable EEE\n");
- stmmac_lpi_entry_timer_config(priv, 0);
- del_timer_sync(&priv->eee_ctrl_timer);
- stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- false);
- }
- mutex_unlock(&priv->lock);
- return false;
- }
-
- if (priv->eee_active && !priv->eee_enabled) {
- timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
- stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
- eee_tw_timer);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- true);
- }
+ struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
- if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
- del_timer_sync(&priv->eee_ctrl_timer);
- priv->tx_path_in_lpi_mode = false;
- stmmac_lpi_entry_timer_config(priv, 1);
- } else {
- stmmac_lpi_entry_timer_config(priv, 0);
- mod_timer(&priv->eee_ctrl_timer,
- STMMAC_LPI_T(priv->tx_lpi_timer));
- }
-
- mutex_unlock(&priv->lock);
- netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
- return true;
+ stmmac_try_to_start_sw_lpi(priv);
}
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
@@ -601,18 +568,19 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
/**
* stmmac_hwtstamp_set - control hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
+ * @config: the timestamping configuration.
+ * @extack: netlink extended ack structure for error reporting.
* Description:
* This function configures the MAC to enable/disable both outgoing(TX)
* and incoming(RX) packets time stamping based on user input.
* Return Value:
* 0 on success and an appropriate -ve integer on failure.
*/
-static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
u32 ptp_v2 = 0;
u32 tstamp_all = 0;
u32 ptp_over_ipv4_udp = 0;
@@ -623,34 +591,36 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
u32 ts_event_en = 0;
if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
- netdev_alert(priv->dev, "No support for HW time stamping\n");
+ NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
return -EOPNOTSUPP;
}
- if (copy_from_user(&config, ifr->ifr_data,
- sizeof(config)))
- return -EFAULT;
+ if (!netif_running(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change timestamping configuration while down");
+ return -ENODEV;
+ }
netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
- __func__, config.flags, config.tx_type, config.rx_filter);
+ __func__, config->flags, config->tx_type, config->rx_filter);
- if (config.tx_type != HWTSTAMP_TX_OFF &&
- config.tx_type != HWTSTAMP_TX_ON)
+ if (config->tx_type != HWTSTAMP_TX_OFF &&
+ config->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
if (priv->adv_ts) {
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* time stamp no incoming packet at all */
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
/* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* 'xmac' hardware can support Sync, Pdelay_Req and
* Pdelay_resp by setting bit14 and bits17/16 to 01
* This leaves Delay_Req timestamps out.
@@ -664,7 +634,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
/* PTP v1, UDP, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -674,7 +644,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
/* PTP v1, UDP, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -685,7 +655,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
/* PTP v2, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -696,7 +666,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
/* PTP v2, UDP, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -707,7 +677,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
/* PTP v2, UDP, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
@@ -719,7 +689,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
/* PTP v2/802.AS1 any layer, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
if (priv->synopsys_id < DWMAC_CORE_4_10)
@@ -731,7 +701,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
/* PTP v2/802.AS1, any layer, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -743,7 +713,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* PTP v2/802.AS1, any layer, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
@@ -757,7 +727,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* time stamp any incoming packet */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
tstamp_all = PTP_TCR_TSENALL;
break;
@@ -765,18 +735,18 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
} else {
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
break;
default:
/* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
}
}
- priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
- priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+ priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
+ priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
priv->systime_flags = STMMAC_HWTS_ACTIVE;
@@ -789,31 +759,30 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
- memcpy(&priv->tstamp_config, &config, sizeof(config));
+ priv->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
/**
* stmmac_hwtstamp_get - read hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
+ * @config: the timestamping configuration.
* Description:
* This function obtain the current hardware timestamping settings
* as requested.
*/
-static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct hwtstamp_config *config = &priv->tstamp_config;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config,
- sizeof(*config)) ? -EFAULT : 0;
+ *config = priv->tstamp_config;
+
+ return 0;
}
/**
@@ -836,6 +805,11 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
+ if (!priv->plat->clk_ptp_rate) {
+ netdev_err(priv->dev, "Invalid PTP clock rate");
+ return -EINVAL;
+ }
+
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
@@ -920,14 +894,16 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
* stmmac_mac_flow_ctrl - Configure flow control in all queues
* @priv: driver private structure
* @duplex: duplex passed to the next function
+ * @flow_ctrl: desired flow control modes
* Description: It is used for configuring the flow control in all queues
*/
-static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
+ unsigned int flow_ctrl)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
- stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
- priv->pause, tx_cnt);
+ stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
+ tx_cnt);
}
static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
@@ -973,13 +949,11 @@ static void stmmac_mac_link_down(struct phylink_config *config,
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
stmmac_mac_set(priv, priv->ioaddr, false);
- priv->eee_active = false;
- priv->tx_lpi_enabled = false;
- priv->eee_enabled = stmmac_eee_init(priv);
- stmmac_set_eee_pls(priv, priv->hw, false);
+ if (priv->dma_cap.eee)
+ stmmac_set_eee_pls(priv, priv->hw, false);
if (stmmac_fpe_supported(priv))
- stmmac_fpe_link_state_handle(priv, false);
+ ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
}
static void stmmac_mac_link_up(struct phylink_config *config,
@@ -989,7 +963,9 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ unsigned int flow_ctrl;
u32 old_ctrl, ctrl;
+ int ret;
if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
priv->plat->serdes_powerup)
@@ -1057,8 +1033,6 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
}
- priv->speed = speed;
-
if (priv->plat->fix_mac_speed)
priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
@@ -1069,42 +1043,121 @@ static void stmmac_mac_link_up(struct phylink_config *config,
/* Flow Control operation */
if (rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_AUTO;
+ flow_ctrl = FLOW_AUTO;
else if (rx_pause && !tx_pause)
- priv->flow_ctrl = FLOW_RX;
+ flow_ctrl = FLOW_RX;
else if (!rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_TX;
+ flow_ctrl = FLOW_TX;
else
- priv->flow_ctrl = FLOW_OFF;
+ flow_ctrl = FLOW_OFF;
- stmmac_mac_flow_ctrl(priv, duplex);
+ stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
if (ctrl != old_ctrl)
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (priv->plat->set_clk_tx_rate) {
+ ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
+ priv->plat->clk_tx_i,
+ interface, speed);
+ if (ret < 0)
+ netdev_err(priv->dev,
+ "failed to configure transmit clock for %dMbps: %pe\n",
+ speed, ERR_PTR(ret));
+ }
+
stmmac_mac_set(priv, priv->ioaddr, true);
- if (phy && priv->dma_cap.eee) {
- priv->eee_active =
- phy_init_eee(phy, !(priv->plat->flags &
- STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
- priv->eee_enabled = stmmac_eee_init(priv);
- priv->tx_lpi_enabled = priv->eee_enabled;
+ if (priv->dma_cap.eee)
stmmac_set_eee_pls(priv, priv->hw, true);
- }
if (stmmac_fpe_supported(priv))
- stmmac_fpe_link_state_handle(priv, true);
+ ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
stmmac_hwtstamp_correct_latency(priv, priv);
}
+static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ priv->eee_active = false;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = false;
+
+ netdev_dbg(priv->dev, "disable EEE\n");
+ priv->eee_sw_timer_en = false;
+ timer_delete_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
+ priv->tx_path_in_lpi_mode = false;
+
+ stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
+ mutex_unlock(&priv->lock);
+}
+
+static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ int ret;
+
+ priv->tx_lpi_timer = timer;
+ priv->eee_active = true;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = true;
+
+ /* Update the transmit clock stop according to PHY capability if
+ * the platform allows
+ */
+ if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
+ priv->tx_lpi_clk_stop = tx_clk_stop;
+
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
+ STMMAC_DEFAULT_TWT_LS);
+
+ /* Try to cnfigure the hardware timer. */
+ ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
+ priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
+
+ if (ret) {
+ /* Hardware timer mode not supported, or value out of range.
+ * Fall back to using software LPI mode
+ */
+ priv->eee_sw_timer_en = true;
+ stmmac_restart_sw_lpi_timer(priv);
+ }
+
+ mutex_unlock(&priv->lock);
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+
+ return 0;
+}
+
+static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (priv->plat->mac_finish)
+ priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
+
+ return 0;
+}
+
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
.mac_get_caps = stmmac_mac_get_caps,
.mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
+ .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
+ .mac_finish = stmmac_mac_finish,
};
/**
@@ -1177,15 +1230,27 @@ static int stmmac_init_phy(struct net_device *dev)
return -ENODEV;
}
- if (priv->dma_cap.eee)
- phy_support_eee(phydev);
-
ret = phylink_connect_phy(priv->phylink, phydev);
} else {
fwnode_handle_put(phy_fwnode);
ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
}
+ if (ret == 0) {
+ struct ethtool_keee eee;
+
+ /* Configure phylib's copy of the LPI timer. Normally,
+ * phylink_config.lpi_timer_default would do this, but there is
+ * a chance that userspace could change the eee_timer setting
+ * via sysfs before the first open. Thus, preserve existing
+ * behaviour.
+ */
+ if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
+ eee.tx_lpi_timer = priv->tx_lpi_timer;
+ phylink_ethtool_set_eee(priv->phylink, &eee);
+ }
+ }
+
if (!priv->plat->pmt) {
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
@@ -1200,38 +1265,74 @@ static int stmmac_init_phy(struct net_device *dev)
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
- int mode = priv->plat->phy_interface;
+ struct phylink_config *config;
struct fwnode_handle *fwnode;
+ struct phylink_pcs *pcs;
struct phylink *phylink;
- priv->phylink_config.dev = &priv->dev->dev;
- priv->phylink_config.type = PHYLINK_NETDEV;
- priv->phylink_config.mac_managed_pm = true;
+ config = &priv->phylink_config;
+
+ config->dev = &priv->dev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_managed_pm = true;
/* Stmmac always requires an RX clock for hardware initialization */
- priv->phylink_config.mac_requires_rxc = true;
+ config->mac_requires_rxc = true;
+
+ if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
+ config->eee_rx_clk_stop_enable = true;
+
+ /* Set the default transmit clock stop bit based on the platform glue */
+ priv->tx_lpi_clk_stop = priv->plat->flags &
+ STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
- priv->phylink_config.default_an_inband =
- mdio_bus_data->default_an_inband;
+ config->default_an_inband = mdio_bus_data->default_an_inband;
- /* Set the platform/firmware specified interface mode. Note, phylink
- * deals with the PHY interface mode, not the MAC interface mode.
+ /* Get the PHY interface modes (at the PHY end of the link) that
+ * are supported by the platform.
*/
- __set_bit(mode, priv->phylink_config.supported_interfaces);
+ if (priv->plat->get_interfaces)
+ priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
+ config->supported_interfaces);
+
+ /* Set the platform/firmware specified interface mode if the
+ * supported interfaces have not already been provided using
+ * phy_interface as a last resort.
+ */
+ if (phy_interface_empty(config->supported_interfaces))
+ __set_bit(priv->plat->phy_interface,
+ config->supported_interfaces);
/* If we have an xpcs, it defines which PHY interfaces are supported. */
if (priv->hw->xpcs)
- xpcs_get_interfaces(priv->hw->xpcs,
- priv->phylink_config.supported_interfaces);
+ pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
+ else
+ pcs = priv->hw->phylink_pcs;
+
+ if (pcs)
+ phy_interface_or(config->supported_interfaces,
+ config->supported_interfaces,
+ pcs->supported_interfaces);
+
+ if (priv->dma_cap.eee) {
+ /* Assume all supported interfaces also support LPI */
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
+
+ /* All full duplex speeds above 100Mbps are supported */
+ config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
+ config->lpi_timer_default = eee_timer * 1000;
+ config->eee_enabled_default = true;
+ }
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
- phylink = phylink_create(&priv->phylink_config, fwnode,
- mode, &stmmac_phylink_mac_ops);
+ phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
+ &stmmac_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
@@ -1307,6 +1408,14 @@ static void stmmac_display_rings(struct stmmac_priv *priv,
stmmac_display_tx_rings(priv, dma_conf);
}
+static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
+{
+ if (stmmac_xdp_is_enabled(priv))
+ return XDP_PACKET_HEADROOM;
+
+ return NET_SKB_PAD;
+}
+
static int stmmac_set_bfsize(int mtu, int bufsize)
{
int ret = bufsize;
@@ -2003,22 +2112,31 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 };
- unsigned int num_pages;
+ unsigned int dma_buf_sz_pad, num_pages;
unsigned int napi_id;
int ret;
+ dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
+
rx_q->queue_index = queue;
rx_q->priv_data = priv;
+ rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = dma_conf->dma_rx_size;
- num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
- pp_params.order = ilog2(num_pages);
+ pp_params.order = order_base_2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
pp_params.offset = stmmac_rx_offset(priv);
- pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
+ pp_params.max_len = dma_conf->dma_buf_sz;
+
+ if (priv->sph) {
+ pp_params.offset = 0;
+ pp_params.max_len += stmmac_rx_offset(priv);
+ }
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
@@ -2445,9 +2563,20 @@ static u64 stmmac_xsk_fill_timestamp(void *_priv)
return 0;
}
+static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct timespec64 ts = ns_to_timespec64(launch_time);
+ struct stmmac_metadata_request *meta_req = _priv;
+
+ if (meta_req->tbs & STMMAC_TBS_EN)
+ stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
+ ts.tv_nsec);
+}
+
static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
.tmo_request_timestamp = stmmac_xsk_request_timestamp,
.tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
+ .tmo_request_launch_time = stmmac_xsk_request_launch_time,
};
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -2531,6 +2660,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
meta_req.priv = priv;
meta_req.tx_desc = tx_desc;
meta_req.set_ic = &set_ic;
+ meta_req.tbs = tx_q->tbs;
+ meta_req.edesc = &tx_q->dma_entx[entry];
xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
&meta_req);
if (set_ic) {
@@ -2757,11 +2888,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
xmits = budget;
}
- if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
- priv->eee_sw_timer_en) {
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
- }
+ if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
+ stmmac_restart_sw_lpi_timer(priv);
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
@@ -2996,7 +3124,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
int ret = 0;
if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
- dev_err(priv->device, "Invalid DMA configuration\n");
+ netdev_err(priv->dev, "Invalid DMA configuration\n");
return -EINVAL;
}
@@ -3005,7 +3133,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
ret = stmmac_reset(priv, priv->ioaddr);
if (ret) {
- dev_err(priv->device, "Failed to reset the dma\n");
+ netdev_err(priv->dev, "Failed to reset the dma\n");
return ret;
}
@@ -3123,8 +3251,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
for (chan = 0; chan < rx_channel_count; chan++)
@@ -3372,9 +3499,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
if (priv->hw->phylink_pcs)
phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
+ /* Note that clk_rx_i must be running for reset to complete. This
+ * clock may also be required when setting the MAC address.
+ *
+ * Block the receive clock stop for LPI mode at the PHY in case
+ * the link is established with EEE mode active.
+ */
+ phylink_rx_clk_stop_block(priv->phylink);
+
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
+ phylink_rx_clk_stop_unblock(priv->phylink);
netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
__func__);
return ret;
@@ -3382,6 +3518,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Copy the MAC addr into the HW */
stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
/* PS and related bits will be programmed according to the speed */
if (priv->hw->pcs) {
@@ -3436,12 +3573,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
else if (ptp_register)
stmmac_ptp_register(priv);
- priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
-
- /* Convert the timer from msec to usec */
- if (!priv->tx_lpi_timer)
- priv->tx_lpi_timer = eee_timer * 1000;
-
if (priv->use_riwt) {
u32 queue;
@@ -3498,7 +3629,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -3570,7 +3703,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
enum request_irq_err irq_err;
- cpumask_t cpu_mask;
int irq_idx = 0;
char *int_name;
int ret;
@@ -3699,9 +3831,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->rx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
/* Request Tx MSI irq */
@@ -3724,9 +3855,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->tx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
return 0;
@@ -3908,6 +4038,10 @@ static int __stmmac_open(struct net_device *dev,
u32 chan;
int ret;
+ /* Initialise the tx lpi timer, converting from msec to usec */
+ if (!priv->tx_lpi_timer)
+ priv->tx_lpi_timer = eee_timer * 1000;
+
ret = pm_runtime_resume_and_get(priv->device);
if (ret < 0)
return ret;
@@ -3923,9 +4057,6 @@ static int __stmmac_open(struct net_device *dev,
}
}
- priv->rx_copybreak = STMMAC_RX_COPYBREAK;
-
- buf_sz = dma_conf->dma_buf_sz;
for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
@@ -4024,20 +4155,12 @@ static int stmmac_release(struct net_device *dev)
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
- if (priv->eee_enabled) {
- priv->tx_path_in_lpi_mode = false;
- del_timer_sync(&priv->eee_ctrl_timer);
- }
-
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv, &priv->dma_conf);
- /* Disable the MAC Rx/Tx */
- stmmac_mac_set(priv, priv->ioaddr, false);
-
/* Powerdown Serdes if there is */
if (priv->plat->serdes_powerdown)
priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
@@ -4045,7 +4168,7 @@ static int stmmac_release(struct net_device *dev)
stmmac_release_ptp(priv);
if (stmmac_fpe_supported(priv))
- timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
+ ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
pm_runtime_put(priv->device);
@@ -4117,11 +4240,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
desc = &tx_q->dma_tx[tx_q->cur_tx];
curr_addr = des + (total_len - tmp_len);
- if (priv->dma_cap.addr64 <= 32)
- desc->des0 = cpu_to_le32(curr_addr);
- else
- stmmac_set_desc_addr(priv, desc, curr_addr);
-
+ stmmac_set_desc_addr(priv, desc, curr_addr);
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
TSO_MAX_BUFF_SIZE : tmp_len;
@@ -4167,17 +4286,27 @@ static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
* First Descriptor
* --------
* | DES0 |---> buffer1 = L2/L3/L4 header
- * | DES1 |---> TCP Payload (can continue on next descr...)
- * | DES2 |---> buffer 1 and 2 len
+ * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
+ * | | width is 32-bit, but we never use it.
+ * | | Also can be used as the most-significant 8-bits or 16-bits of
+ * | | buffer1 address pointer if the DMA AXI address width is 40-bit
+ * | | or 48-bit, and we always use it.
+ * | DES2 |---> buffer1 len
* | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
* --------
+ * --------
+ * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
+ * | DES3 |
+ * --------
* |
* ...
* |
* --------
- * | DES0 | --| Split TCP Payload on Buffers 1 and 2
- * | DES1 | --|
- * | DES2 | --> buffer 1 and 2 len
+ * | DES0 |---> buffer1 = Split TCP Payload
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
* | DES3 |
* --------
*
@@ -4187,15 +4316,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
- int tmp_pay_len = 0, first_tx, nfrags;
unsigned int first_entry, tx_packets;
struct stmmac_txq_stats *txq_stats;
struct stmmac_tx_queue *tx_q;
u32 pay_len, mss, queue;
+ int i, first_tx, nfrags;
u8 proto_hdr_len, hdr;
dma_addr_t des;
bool set_ic;
- int i;
/* Always insert VLAN tag to SKB payload for TSO frames.
*
@@ -4280,23 +4408,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- if (priv->dma_cap.addr64 <= 32) {
- first->des0 = cpu_to_le32(des);
-
- /* Fill start of payload in buff2 of first descriptor */
- if (pay_len)
- first->des1 = cpu_to_le32(des + proto_hdr_len);
-
- /* If needed take extra descriptors to fill the remaining payload */
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
- } else {
- stmmac_set_desc_addr(priv, first, des);
- tmp_pay_len = pay_len;
- des += proto_hdr_len;
- pay_len = 0;
- }
-
- stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
+ stmmac_set_desc_addr(priv, first, des);
+ stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
+ (nfrags == 0), queue);
/* In case two or more DMA transmit descriptors are allocated for this
* non-paged SKB data, the DMA buffer address should be saved to
@@ -4390,8 +4504,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
- skb_tx_timestamp(skb);
-
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
/* declare that device is doing timestamping */
@@ -4400,11 +4512,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Complete the first descriptor before granting the DMA */
- stmmac_prepare_tso_tx_desc(priv, first, 1,
- proto_hdr_len,
- pay_len,
- 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
- hdr / 4, (skb->len - proto_hdr_len));
+ stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
+ tx_q->tx_skbuff_dma[first_entry].last_segment,
+ hdr / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
if (mss_desc) {
@@ -4426,6 +4536,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ skb_tx_timestamp(skb);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -4491,7 +4602,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
- stmmac_disable_eee_mode(priv);
+ stmmac_stop_sw_lpi(priv);
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
@@ -4669,8 +4780,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
- skb_tx_timestamp(skb);
-
/* Ready to fill the first descriptor and set the OWN bit w/o any
* problems because all the descriptors are actually ready to be
* passed to the DMA engine.
@@ -4717,7 +4826,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
-
+ skb_tx_timestamp(skb);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -5398,10 +5507,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
int xdp_status = 0;
- int buf_sz;
+ int bufsz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
if (netif_msg_rx_status(priv)) {
@@ -5472,7 +5581,7 @@ read_again:
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
buf->page = NULL;
error = 1;
if (!priv->hwts_rx_en)
@@ -5490,10 +5599,6 @@ read_again:
/* Buffer is good. Go on. */
- prefetch(page_address(buf->page) + buf->page_offset);
- if (buf->sec_page)
- prefetch(page_address(buf->sec_page));
-
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
@@ -5515,8 +5620,10 @@ read_again:
dma_sync_single_for_cpu(priv->device, buf->addr,
buf1_len, dma_dir);
+ net_prefetch(page_address(buf->page) +
+ buf->page_offset);
- xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
buf->page_offset, buf1_len, true);
@@ -5568,22 +5675,26 @@ read_again:
}
if (!skb) {
+ unsigned int head_pad_len;
+
/* XDP program may expand or reduce tail */
buf1_len = ctx.xdp.data_end - ctx.xdp.data;
- skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
+ skb = napi_build_skb(page_address(buf->page),
+ rx_q->napi_skb_frag_size);
if (!skb) {
+ page_pool_recycle_direct(rx_q->page_pool,
+ buf->page);
rx_dropped++;
count++;
goto drain_data;
}
/* XDP program may adjust header */
- skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
+ head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
+ skb_reserve(skb, head_pad_len);
skb_put(skb, buf1_len);
-
- /* Data payload copied into SKB, page ready for recycle */
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ skb_mark_for_recycle(skb);
buf->page = NULL;
} else if (buf1_len) {
dma_sync_single_for_cpu(priv->device, buf->addr,
@@ -5591,9 +5702,6 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len,
priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- skb_mark_for_recycle(skb);
buf->page = NULL;
}
@@ -5603,9 +5711,6 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- skb_mark_for_recycle(skb);
buf->sec_page = NULL;
}
@@ -5804,6 +5909,9 @@ static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
* whenever multicast addresses must be enabled/disabled.
* Return value:
* void.
+ *
+ * FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
*/
static void stmmac_set_rx_mode(struct net_device *dev)
{
@@ -5936,7 +6044,9 @@ static int stmmac_set_features(struct net_device *netdev,
else
priv->hw->hw_vlan_en = false;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -6122,12 +6232,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCSMIIREG:
ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
break;
- case SIOCSHWTSTAMP:
- ret = stmmac_hwtstamp_set(dev, rq);
- break;
- case SIOCGHWTSTAMP:
- ret = stmmac_hwtstamp_get(dev, rq);
- break;
default:
break;
}
@@ -6220,7 +6324,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
if (ret)
goto set_mac_error;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
set_mac_error:
pm_runtime_put(priv->device);
@@ -6488,11 +6594,7 @@ static int stmmac_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_CHANGENAME:
- if (priv->dbgfs_dir)
- priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
- priv->dbgfs_dir,
- stmmac_fs_dir,
- dev->name);
+ debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
break;
}
done:
@@ -6580,6 +6682,9 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6611,6 +6716,9 @@ err_pm_put:
return ret;
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6922,8 +7030,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
/* Enable the MAC Rx/Tx */
@@ -7063,6 +7170,8 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_bpf = stmmac_bpf,
.ndo_xdp_xmit = stmmac_xdp_xmit,
.ndo_xsk_wakeup = stmmac_xsk_wakeup,
+ .ndo_hwtstamp_get = stmmac_hwtstamp_get,
+ .ndo_hwtstamp_set = stmmac_hwtstamp_set,
};
static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@ -7174,6 +7283,36 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->dma_cap.tsoen)
dev_info(priv->device, "TSO supported\n");
+ if (priv->dma_cap.number_rx_queues &&
+ priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
+ dev_warn(priv->device,
+ "Number of Rx queues (%u) exceeds dma capability\n",
+ priv->plat->rx_queues_to_use);
+ priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
+ }
+ if (priv->dma_cap.number_tx_queues &&
+ priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
+ dev_warn(priv->device,
+ "Number of Tx queues (%u) exceeds dma capability\n",
+ priv->plat->tx_queues_to_use);
+ priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
+ }
+
+ if (priv->dma_cap.rx_fifo_size &&
+ priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
+ dev_warn(priv->device,
+ "Rx FIFO size (%u) exceeds dma capability\n",
+ priv->plat->rx_fifo_size);
+ priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
+ }
+ if (priv->dma_cap.tx_fifo_size &&
+ priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
+ dev_warn(priv->device,
+ "Tx FIFO size (%u) exceeds dma capability\n",
+ priv->plat->tx_fifo_size);
+ priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
+ }
+
priv->hw->vlan_fail_q_en =
(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
@@ -7366,7 +7505,7 @@ int stmmac_dvr_probe(struct device *device,
return -ENOMEM;
stmmac_set_ethtool_ops(ndev);
- priv->pause = pause;
+ priv->pause_time = pause;
priv->plat = plat_dat;
priv->ioaddr = res->addr;
priv->dev->base_addr = (unsigned long)res->addr;
@@ -7406,6 +7545,8 @@ int stmmac_dvr_probe(struct device *device,
INIT_WORK(&priv->service_task, stmmac_service_task);
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
+
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
*/
@@ -7512,7 +7653,7 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
- if (priv->plat->has_gmac4) {
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
priv->hw->hw_vlan_en = true;
}
@@ -7560,9 +7701,6 @@ int stmmac_dvr_probe(struct device *device,
"%s: warning: maxmtu having invalid value (%d)\n",
__func__, priv->plat->maxmtu);
- if (flow_ctrl)
- priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
-
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
/* Setup channels NAPI */
@@ -7598,9 +7736,6 @@ int stmmac_dvr_probe(struct device *device,
goto error_mdio_register;
}
- if (priv->plat->speed_mode_2500)
- priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
-
ret = stmmac_pcs_setup(ndev);
if (ret)
goto error_pcs_setup;
@@ -7664,8 +7799,6 @@ void stmmac_dvr_remove(struct device *dev)
pm_runtime_get_sync(dev);
- stmmac_stop_all_dma(priv);
- stmmac_mac_set(priv, priv->ioaddr, false);
unregister_netdev(ndev);
#ifdef CONFIG_DEBUG_FS
@@ -7713,9 +7846,9 @@ int stmmac_suspend(struct device *dev)
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- if (priv->eee_enabled) {
+ if (priv->eee_sw_timer_en) {
priv->tx_path_in_lpi_mode = false;
- del_timer_sync(&priv->eee_ctrl_timer);
+ timer_delete_sync(&priv->eee_ctrl_timer);
}
/* Stop TX/RX DMA */
@@ -7736,19 +7869,16 @@ int stmmac_suspend(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_suspend(priv->phylink, true);
- } else {
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
- phylink_suspend(priv->phylink, false);
- }
+ if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ phylink_speed_down(priv->phylink, false);
+
+ phylink_suspend(priv->phylink,
+ device_may_wakeup(priv->device) && priv->plat->pmt);
rtnl_unlock();
if (stmmac_fpe_supported(priv))
- timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
+ ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
- priv->speed = SPEED_UNKNOWN;
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -7832,16 +7962,12 @@ int stmmac_resume(struct device *dev)
}
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_resume(priv->phylink);
- } else {
- phylink_resume(priv->phylink);
- if (device_may_wakeup(priv->device))
- phylink_speed_up(priv->phylink);
- }
- rtnl_unlock();
- rtnl_lock();
+ /* Prepare the PHY to resume, ensuring that its clocks which are
+ * necessary for the MAC DMA reset to complete are running
+ */
+ phylink_prepare_resume(priv->phylink);
+
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -7851,14 +7977,25 @@ int stmmac_resume(struct device *dev)
stmmac_hw_setup(ndev, false);
stmmac_init_coalesce(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_rx_mode(ndev);
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
stmmac_enable_all_queues(priv);
stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
+
+ /* phylink_resume() must be called after the hardware has been
+ * initialised because it may bring the link up immediately in a
+ * workqueue thread, which will race with initialisation.
+ */
+ phylink_resume(priv->phylink);
+ if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ phylink_speed_up(priv->phylink);
+
rtnl_unlock();
netif_device_attach(ndev);
@@ -7881,9 +8018,6 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "phyaddr:", 8)) {
if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
- } else if (!strncmp(opt, "buf_sz:", 7)) {
- if (kstrtoint(opt + 7, 0, &buf_sz))
- goto err;
} else if (!strncmp(opt, "tc:", 3)) {
if (kstrtoint(opt + 3, 0, &tc))
goto err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0c7d81ddd440..836f2848dfeb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -524,6 +524,9 @@ int stmmac_pcs_setup(struct net_device *ndev)
if (ret)
return dev_err_probe(priv->device, ret, "No xPCS found\n");
+ if (xpcs)
+ xpcs_config_eee_mult_fact(xpcs, priv->plat->mult_fact_100ns);
+
priv->hw->xpcs = xpcs;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 352b01678c22..9c1b54b701f7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -155,9 +155,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
{
struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
struct plat_stmmacenet_data *plat;
- struct stmmac_resources res;
- int i;
+ struct stmmac_resources res = {};
int ret;
+ int i;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -192,9 +192,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
- ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
- if (ret)
- return ret;
+ res.addr = pcim_iomap_region(pdev, i, STMMAC_RESOURCE_NAME);
+ if (IS_ERR(res.addr))
+ return PTR_ERR(res.addr);
break;
}
@@ -204,8 +204,6 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[i];
res.wol_irq = pdev->irq;
res.irq = pdev->irq;
@@ -226,21 +224,11 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
* stmmac_pci_remove
*
* @pdev: platform device pointer
- * Description: this function calls the main to free the net resources
- * and releases the PCI resources.
+ * Description: this function calls the main to free the net resources.
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
- int i;
-
stmmac_dvr_remove(&pdev->dev);
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
}
static int __maybe_unused stmmac_pci_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3ac32444e492..b80c1efdb323 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -405,21 +405,16 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
return -ENODEV;
}
-/**
- * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
- * @pdev: platform_device structure
- * @plat: driver data platform structure
- *
- * Release resources claimed by stmmac_probe_config_dt().
- */
-static void stmmac_remove_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
-{
- clk_disable_unprepare(plat->stmmac_clk);
- clk_disable_unprepare(plat->pclk);
- of_node_put(plat->phy_node);
- of_node_put(plat->mdio_node);
-}
+/* Compatible string array for all gmac4 devices */
+static const char * const stmmac_gmac4_compats[] = {
+ "snps,dwmac-4.00",
+ "snps,dwmac-4.10a",
+ "snps,dwmac-4.20a",
+ "snps,dwmac-5.10a",
+ "snps,dwmac-5.20",
+ "snps,dwmac-5.30a",
+ NULL
+};
/**
* stmmac_probe_config_dt - parse device-tree driver parameters
@@ -435,6 +430,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ static int bus_id = -ENODEV;
int phy_mode;
void *ret;
int rc;
@@ -470,8 +466,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
of_property_read_u32(np, "max-speed", &plat->max_speed);
plat->bus_id = of_alias_get_id(np, "ethernet");
- if (plat->bus_id < 0)
- plat->bus_id = 0;
+ if (plat->bus_id < 0) {
+ if (bus_id < 0)
+ bus_id = of_alias_get_highest_id("ethernet");
+ /* No ethernet alias found, init at -1 so first bus_id is 0 */
+ if (bus_id < 0)
+ bus_id = -1;
+ plat->bus_id = ++bus_id;
+ }
/* Default to phy auto-detection */
plat->phy_addr = -1;
@@ -490,8 +492,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
rc = stmmac_mdio_setup(plat, np, &pdev->dev);
- if (rc)
- return ERR_PTR(rc);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto error_put_phy;
+ }
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
@@ -500,8 +504,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->force_sf_dma_mode =
of_property_read_bool(np, "snps,force_sf_dma_mode");
- if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating"))
+ if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating")) {
+ dev_warn(&pdev->dev,
+ "OF property snps,en-tx-lpi-clockgating is deprecated, please convert driver to use STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP\n");
plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
+ }
/* Set the maxmtu to a default of JUMBO_LEN in case the
* parameter is not present in the device tree.
@@ -552,11 +559,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->pmt = 1;
}
- if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
- of_device_is_compatible(np, "snps,dwmac-4.10a") ||
- of_device_is_compatible(np, "snps,dwmac-4.20a") ||
- of_device_is_compatible(np, "snps,dwmac-5.10a") ||
- of_device_is_compatible(np, "snps,dwmac-5.20")) {
+ if (of_device_compatible_match(np, stmmac_gmac4_compats)) {
plat->has_gmac4 = 1;
plat->has_gmac = 0;
plat->pmt = 1;
@@ -581,8 +584,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
- stmmac_remove_config_dt(pdev, plat);
- return ERR_PTR(-ENOMEM);
+ ret = ERR_PTR(-ENOMEM);
+ goto error_put_mdio;
}
plat->dma_cfg = dma_cfg;
@@ -610,8 +613,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
rc = stmmac_mtl_setup(pdev, plat);
if (rc) {
- stmmac_remove_config_dt(pdev, plat);
- return ERR_PTR(rc);
+ ret = ERR_PTR(rc);
+ goto error_put_mdio;
}
/* clock setup */
@@ -640,7 +643,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dev_info(&pdev->dev, "PTP uses main clock\n");
} else {
plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
- dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
+ dev_dbg(&pdev->dev, "PTP rate %lu\n", plat->clk_ptp_rate);
}
plat->stmmac_rst = devm_reset_control_get_optional(&pdev->dev,
@@ -663,6 +666,10 @@ error_hw_init:
clk_disable_unprepare(plat->pclk);
error_pclk_get:
clk_disable_unprepare(plat->stmmac_clk);
+error_put_mdio:
+ of_node_put(plat->mdio_node);
+error_put_phy:
+ of_node_put(plat->phy_node);
return ret;
}
@@ -671,16 +678,17 @@ static void devm_stmmac_remove_config_dt(void *data)
{
struct plat_stmmacenet_data *plat = data;
- /* Platform data argument is unused */
- stmmac_remove_config_dt(NULL, plat);
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_disable_unprepare(plat->pclk);
+ of_node_put(plat->mdio_node);
+ of_node_put(plat->phy_node);
}
/**
* devm_stmmac_probe_config_dt
* @pdev: platform_device structure
* @mac: MAC address to use
- * Description: Devres variant of stmmac_probe_config_dt(). Does not require
- * the user to call stmmac_remove_config_dt() at driver detach.
+ * Description: Devres variant of stmmac_probe_config_dt().
*/
struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
@@ -708,6 +716,17 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
#endif /* CONFIG_OF */
EXPORT_SYMBOL_GPL(devm_stmmac_probe_config_dt);
+struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name)
+{
+ for (int i = 0; i < plat_dat->num_clks; i++)
+ if (strcmp(plat_dat->clks[i].id, name) == 0)
+ return plat_dat->clks[i].clk;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(stmmac_pltfr_find_clk);
+
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 72dc1a32e46d..6e6561e29d6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -14,6 +14,9 @@
struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
+struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name);
+
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 429b2d357813..3767ba495e78 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -317,7 +317,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
/* Calculate the clock domain crossing (CDC) error if necessary */
priv->plat->cdc_error_adj = 0;
- if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
+ if (priv->plat->has_gmac4)
priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
/* Update the ptp clock parameters based on feature discovery, when
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 4cc70480ce0f..3fe0e3a80e80 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -104,4 +104,7 @@ int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time);
void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv);
+extern const struct ptp_clock_info stmmac_ptp_clock_ops;
+extern const struct ptp_clock_info dwmac1000_ptp_clock_ops;
+
#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 3ca1c2a816ff..a01bc394d1ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -382,14 +382,14 @@ static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
if (!priv->dev->phydev)
return -EOPNOTSUPP;
- ret = phy_loopback(priv->dev->phydev, true);
+ ret = phy_loopback(priv->dev->phydev, true, 0);
if (ret)
return ret;
attr.dst = priv->dev->dev_addr;
ret = __stmmac_test_loopback(priv, &attr);
- phy_loopback(priv->dev->phydev, false);
+ phy_loopback(priv->dev->phydev, false, 0);
return ret;
}
@@ -1985,7 +1985,7 @@ void stmmac_selftest_run(struct net_device *dev,
case STMMAC_LOOPBACK_PHY:
ret = -EOPNOTSUPP;
if (dev->phydev)
- ret = phy_loopback(dev->phydev, true);
+ ret = phy_loopback(dev->phydev, true, 0);
if (!ret)
break;
fallthrough;
@@ -2018,7 +2018,7 @@ void stmmac_selftest_run(struct net_device *dev,
case STMMAC_LOOPBACK_PHY:
ret = -EOPNOTSUPP;
if (dev->phydev)
- ret = phy_loopback(dev->phydev, false);
+ ret = phy_loopback(dev->phydev, false, 0);
if (!ret)
break;
fallthrough;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 6a79e6a111ed..694d6ee14381 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1284,14 +1284,3 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.query_caps = tc_query_caps,
.setup_mqprio = tc_setup_dwmac510_mqprio,
};
-
-const struct stmmac_tc_ops dwxgmac_tc_ops = {
- .init = tc_init,
- .setup_cls_u32 = tc_setup_cls_u32,
- .setup_cbs = tc_setup_cbs,
- .setup_cls = tc_setup_cls,
- .setup_taprio = tc_setup_taprio,
- .setup_etf = tc_setup_etf,
- .query_caps = tc_query_caps,
- .setup_mqprio = tc_setup_dwmac510_mqprio,
-};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
new file mode 100644
index 000000000000..0b6f6228ae35
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025, Altera Corporation
+ * stmmac VLAN (802.1Q) handling
+ */
+
+#include "stmmac.h"
+#include "stmmac_vlan.h"
+
+static void vlan_write_single(struct net_device *dev, u16 vid)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 val;
+
+ val = readl(ioaddr + VLAN_TAG);
+ val &= ~VLAN_TAG_VID;
+ val |= VLAN_TAG_ETV | vid;
+
+ writel(val, ioaddr + VLAN_TAG);
+}
+
+static int vlan_write_filter(struct net_device *dev,
+ struct mac_device_info *hw,
+ u8 index, u32 data)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ int ret;
+ u32 val;
+
+ if (index >= hw->num_vlan)
+ return -EINVAL;
+
+ writel(data, ioaddr + VLAN_TAG_DATA);
+
+ val = readl(ioaddr + VLAN_TAG);
+ val &= ~(VLAN_TAG_CTRL_OFS_MASK |
+ VLAN_TAG_CTRL_CT |
+ VLAN_TAG_CTRL_OB);
+ val |= (index << VLAN_TAG_CTRL_OFS_SHIFT) | VLAN_TAG_CTRL_OB;
+
+ writel(val, ioaddr + VLAN_TAG);
+
+ ret = readl_poll_timeout(ioaddr + VLAN_TAG, val,
+ !(val & VLAN_TAG_CTRL_OB),
+ 1000, 500000);
+ if (ret) {
+ netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vlan_add_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int index = -1;
+ u32 val = 0;
+ int i, ret;
+
+ if (vid > 4095)
+ return -EINVAL;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ /* For single VLAN filter, VID 0 means VLAN promiscuous */
+ if (vid == 0) {
+ netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
+ return -EPERM;
+ }
+
+ if (hw->vlan_filter[0] & VLAN_TAG_VID) {
+ netdev_err(dev, "Only single VLAN ID supported\n");
+ return -EPERM;
+ }
+
+ hw->vlan_filter[0] = vid;
+ vlan_write_single(dev, vid);
+
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ val |= VLAN_TAG_DATA_ETV | VLAN_TAG_DATA_VEN | vid;
+
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] == val)
+ return 0;
+ else if (!(hw->vlan_filter[i] & VLAN_TAG_DATA_VEN))
+ index = i;
+ }
+
+ if (index == -1) {
+ netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
+ hw->num_vlan);
+ return -EPERM;
+ }
+
+ ret = vlan_write_filter(dev, hw, index, val);
+
+ if (!ret)
+ hw->vlan_filter[index] = val;
+
+ return ret;
+}
+
+static int vlan_del_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int i, ret = 0;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ if ((hw->vlan_filter[0] & VLAN_TAG_VID) == vid) {
+ hw->vlan_filter[0] = 0;
+ vlan_write_single(dev, 0);
+ }
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid) {
+ ret = vlan_write_filter(dev, hw, i, 0);
+
+ if (!ret)
+ hw->vlan_filter[i] = 0;
+ else
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ u32 hash;
+ u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ vlan_write_single(dev, hw->vlan_filter[0]);
+ return;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) {
+ val = hw->vlan_filter[i];
+ vlan_write_filter(dev, hw, i, val);
+ }
+ }
+
+ hash = readl(ioaddr + VLAN_HASH_TABLE);
+ if (hash & VLAN_VLHT) {
+ value = readl(ioaddr + VLAN_TAG);
+ value |= VLAN_VTHM;
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ writel(hash, ioaddr + VLAN_HASH_TABLE);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ if (hash) {
+ value |= VLAN_VTHM | VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ }
+
+ writel(value, ioaddr + VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = VLAN_ETV;
+
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + VLAN_TAG);
+ } else {
+ value &= ~(VLAN_VTHM | VLAN_ETV);
+ value &= ~(VLAN_EDVLP | VLAN_ESVL);
+ value &= ~VLAN_DOVLTC;
+ value &= ~VLAN_VID;
+
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+static void vlan_enable(struct mac_device_info *hw, u32 type)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + VLAN_INCL);
+ value |= VLAN_VLTI;
+ value |= VLAN_CSVL; /* Only use SVLAN */
+ value &= ~VLAN_VLC;
+ value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC;
+ writel(value, ioaddr + VLAN_INCL);
+}
+
+static void vlan_rx_hw(struct mac_device_info *hw,
+ struct dma_desc *rx_desc, struct sk_buff *skb)
+{
+ if (hw->desc->get_rx_vlan_valid(rx_desc)) {
+ u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
+static void vlan_set_hw_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~VLAN_TAG_CTRL_EVLS_MASK;
+
+ if (hw->hw_vlan_en)
+ /* Always strip VLAN on Receive */
+ value |= VLAN_TAG_STRIP_ALL;
+ else
+ /* Do not strip VLAN on Receive */
+ value |= VLAN_TAG_STRIP_NONE;
+
+ /* Enable outer VLAN Tag in Rx DMA descriptor */
+ value |= VLAN_TAG_CTRL_EVLRXS;
+ writel(value, ioaddr + VLAN_TAG);
+}
+
+static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(hash, ioaddr + VLAN_HASH_TABLE);
+
+ if (hash) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value |= VLAN_VTHM | VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ } else {
+ value &= ~VLAN_EDVLP;
+ value &= ~VLAN_ESVL;
+ value &= ~VLAN_DOVLTC;
+ }
+
+ value &= ~VLAN_VID;
+ writel(value, ioaddr + VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~VLAN_VTHM;
+ value |= VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ } else {
+ value &= ~VLAN_EDVLP;
+ value &= ~VLAN_ESVL;
+ value &= ~VLAN_DOVLTC;
+ }
+
+ value &= ~VLAN_VID;
+ writel(value | perfect_match, ioaddr + VLAN_TAG);
+ } else {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value &= ~XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~(VLAN_VTHM | VLAN_ETV);
+ value &= ~(VLAN_EDVLP | VLAN_ESVL);
+ value &= ~VLAN_DOVLTC;
+ value &= ~VLAN_VID;
+
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+const struct stmmac_vlan_ops dwmac_vlan_ops = {
+ .update_vlan_hash = vlan_update_hash,
+ .enable_vlan = vlan_enable,
+ .add_hw_vlan_rx_fltr = vlan_add_hw_rx_fltr,
+ .del_hw_vlan_rx_fltr = vlan_del_hw_rx_fltr,
+ .restore_hw_vlan_rx_fltr = vlan_restore_hw_rx_fltr,
+ .rx_hw_vlan = vlan_rx_hw,
+ .set_hw_vlan_mode = vlan_set_hw_mode,
+};
+
+const struct stmmac_vlan_ops dwxlgmac2_vlan_ops = {
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .enable_vlan = vlan_enable,
+};
+
+const struct stmmac_vlan_ops dwxgmac210_vlan_ops = {
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .enable_vlan = vlan_enable,
+ .add_hw_vlan_rx_fltr = vlan_add_hw_rx_fltr,
+ .del_hw_vlan_rx_fltr = vlan_del_hw_rx_fltr,
+ .restore_hw_vlan_rx_fltr = vlan_restore_hw_rx_fltr,
+ .rx_hw_vlan = vlan_rx_hw,
+ .set_hw_vlan_mode = vlan_set_hw_mode,
+};
+
+u32 stmmac_get_num_vlan(void __iomem *ioaddr)
+{
+ u32 val, num_vlan;
+
+ val = readl(ioaddr + HW_FEATURE3);
+ switch (val & VLAN_HW_FEAT_NRVF) {
+ case 0:
+ num_vlan = 1;
+ break;
+ case 1:
+ num_vlan = 4;
+ break;
+ case 2:
+ num_vlan = 8;
+ break;
+ case 3:
+ num_vlan = 16;
+ break;
+ case 4:
+ num_vlan = 24;
+ break;
+ case 5:
+ num_vlan = 32;
+ break;
+ default:
+ num_vlan = 1;
+ }
+
+ return num_vlan;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h
new file mode 100644
index 000000000000..c24f89a9049b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025, Altera Corporation
+ * stmmac VLAN(802.1Q) handling
+ */
+
+#ifndef __STMMAC_VLAN_H__
+#define __STMMAC_VLAN_H__
+
+#include <linux/bitfield.h>
+#include "dwxgmac2.h"
+
+#define VLAN_TAG 0x00000050
+#define VLAN_TAG_DATA 0x00000054
+#define VLAN_HASH_TABLE 0x00000058
+#define VLAN_INCL 0x00000060
+
+/* MAC VLAN */
+#define VLAN_EDVLP BIT(26)
+#define VLAN_VTHM BIT(25)
+#define VLAN_DOVLTC BIT(20)
+#define VLAN_ESVL BIT(18)
+#define VLAN_ETV BIT(16)
+#define VLAN_VID GENMASK(15, 0)
+#define VLAN_VLTI BIT(20)
+#define VLAN_CSVL BIT(19)
+#define VLAN_VLC GENMASK(17, 16)
+#define VLAN_VLC_SHIFT 16
+#define VLAN_VLHT GENMASK(15, 0)
+
+/* MAC VLAN Tag */
+#define VLAN_TAG_VID GENMASK(15, 0)
+#define VLAN_TAG_ETV BIT(16)
+
+/* MAC VLAN Tag Control */
+#define VLAN_TAG_CTRL_OB BIT(0)
+#define VLAN_TAG_CTRL_CT BIT(1)
+#define VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
+#define VLAN_TAG_CTRL_OFS_SHIFT 2
+#define VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
+#define VLAN_TAG_CTRL_EVLS_SHIFT 21
+#define VLAN_TAG_CTRL_EVLRXS BIT(24)
+
+#define VLAN_TAG_STRIP_NONE FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x0)
+#define VLAN_TAG_STRIP_PASS FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x1)
+#define VLAN_TAG_STRIP_FAIL FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x2)
+#define VLAN_TAG_STRIP_ALL FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x3)
+
+/* MAC VLAN Tag Data/Filter */
+#define VLAN_TAG_DATA_VID GENMASK(15, 0)
+#define VLAN_TAG_DATA_VEN BIT(16)
+#define VLAN_TAG_DATA_ETV BIT(17)
+
+/* MAC VLAN HW FEAT */
+#define HW_FEATURE3 0x00000128
+#define VLAN_HW_FEAT_NRVF GENMASK(2, 0)
+
+extern const struct stmmac_vlan_ops dwmac_vlan_ops;
+extern const struct stmmac_vlan_ops dwxgmac210_vlan_ops;
+extern const struct stmmac_vlan_ops dwxlgmac2_vlan_ops;
+
+u32 stmmac_get_num_vlan(void __iomem *ioaddr);
+
+#endif /* __STMMAC_VLAN_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
index 896dc987d4ef..77ce8cfbe976 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
@@ -4,7 +4,6 @@
#ifndef _STMMAC_XDP_H_
#define _STMMAC_XDP_H_
-#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM)
#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b8948d5b779a..acfb523214b9 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3779,7 +3779,7 @@ static void cas_shutdown(struct cas *cp)
/* Make us not-running to avoid timers respawning */
cp->hw_running = 0;
- del_timer_sync(&cp->link_timer);
+ timer_delete_sync(&cp->link_timer);
/* Stop the reset task */
#if 0
@@ -4021,7 +4021,7 @@ done:
static void cas_link_timer(struct timer_list *t)
{
- struct cas *cp = from_timer(cp, t, link_timer);
+ struct cas *cp = timer_container_of(cp, t, link_timer);
int mask, pending = 0, reset = 0;
unsigned long flags;
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index a9a6670b5ff1..6fc37ab27f7b 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -390,7 +390,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
return 0;
err_out_del_timer:
- del_timer_sync(&port->clean_timer);
+ timer_delete_sync(&port->clean_timer);
list_del_rcu(&port->list);
synchronize_rcu();
netif_napi_del(&port->napi);
@@ -408,8 +408,8 @@ static void vsw_port_remove(struct vio_dev *vdev)
unsigned long flags;
if (port) {
- del_timer_sync(&port->vio.timer);
- del_timer_sync(&port->clean_timer);
+ timer_delete_sync(&port->vio.timer);
+ timer_delete_sync(&port->clean_timer);
napi_disable(&port->napi);
unregister_netdev(port->dev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index df6d35d41b97..26119d02a94d 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -2225,7 +2225,7 @@ static int niu_link_status(struct niu *np, int *link_up_p)
static void niu_timer(struct timer_list *t)
{
- struct niu *np = from_timer(np, t, timer);
+ struct niu *np = timer_container_of(np, t, timer);
unsigned long off;
int err, link_up;
@@ -3303,7 +3303,7 @@ static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
addr &= PAGE_MASK;
pp = &rp->rxhash[h];
for (; (p = *pp) != NULL; pp = &niu_next_page(p)) {
- if (p->index == addr) {
+ if (p->private == addr) {
*link = pp;
goto found;
}
@@ -3318,7 +3318,7 @@ static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
{
unsigned int h = niu_hash_rxaddr(rp, base);
- page->index = base;
+ page->private = base;
niu_next_page(page) = rp->rxhash[h];
rp->rxhash[h] = page;
}
@@ -3336,7 +3336,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
addr = np->ops->map_page(np->device, page, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
- if (!addr) {
+ if (np->ops->mapping_error(np->device, addr)) {
__free_page(page);
return -ENOMEM;
}
@@ -3400,11 +3400,11 @@ static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
RCR_ENTRY_PKTBUFSZ_SHIFT];
- if ((page->index + PAGE_SIZE) - rcr_size == addr) {
+ if ((page->private + PAGE_SIZE) - rcr_size == addr) {
*link = niu_next_page(page);
- np->ops->unmap_page(np->device, page->index,
+ np->ops->unmap_page(np->device, page->private,
PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
__free_page(page);
rp->rbr_refill_pending++;
@@ -3469,11 +3469,11 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
append_size = append_size - skb->len;
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
- if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
+ if ((page->private + rp->rbr_block_size) - rcr_size == addr) {
*link = niu_next_page(page);
- np->ops->unmap_page(np->device, page->index,
+ np->ops->unmap_page(np->device, page->private,
PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
rp->rbr_refill_pending++;
} else
@@ -3538,11 +3538,11 @@ static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
page = rp->rxhash[i];
while (page) {
struct page *next = niu_next_page(page);
- u64 base = page->index;
+ u64 base = page->private;
np->ops->unmap_page(np->device, base, PAGE_SIZE,
DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
__free_page(page);
@@ -6086,7 +6086,7 @@ static void niu_enable_napi(struct niu *np)
int i;
for (i = 0; i < np->num_ldg; i++)
- napi_enable(&np->ldg[i].napi);
+ napi_enable_locked(&np->ldg[i].napi);
}
static void niu_disable_napi(struct niu *np)
@@ -6116,7 +6116,9 @@ static int niu_open(struct net_device *dev)
if (err)
goto out_free_channels;
+ netdev_lock(dev);
niu_enable_napi(np);
+ netdev_unlock(dev);
spin_lock_irq(&np->lock);
@@ -6163,7 +6165,7 @@ static void niu_full_shutdown(struct niu *np, struct net_device *dev)
niu_disable_napi(np);
netif_tx_stop_all_queues(dev);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
spin_lock_irq(&np->lock);
@@ -6460,7 +6462,7 @@ static void niu_reset_buffers(struct niu *np)
page = rp->rxhash[j];
while (page) {
struct page *next = niu_next_page(page);
- u64 base = page->index;
+ u64 base = page->private;
base = base >> RBR_DESCR_ADDR_SHIFT;
rp->rbr[k++] = cpu_to_le32(base);
page = next;
@@ -6509,7 +6511,7 @@ static void niu_reset_task(struct work_struct *work)
spin_unlock_irqrestore(&np->lock, flags);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
niu_netif_stop(np);
@@ -6521,6 +6523,7 @@ static void niu_reset_task(struct work_struct *work)
niu_reset_buffers(np);
+ netdev_lock(np->dev);
spin_lock_irqsave(&np->lock, flags);
err = niu_init_hw(np);
@@ -6531,6 +6534,7 @@ static void niu_reset_task(struct work_struct *work)
}
spin_unlock_irqrestore(&np->lock, flags);
+ netdev_unlock(np->dev);
}
static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -6672,6 +6676,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
len = skb_headlen(skb);
mapping = np->ops->map_single(np->device, skb->data,
len, DMA_TO_DEVICE);
+ if (np->ops->mapping_error(np->device, mapping))
+ goto out_drop;
prod = rp->prod;
@@ -6713,6 +6719,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
skb_frag_off(frag), len,
DMA_TO_DEVICE);
+ if (np->ops->mapping_error(np->device, mapping))
+ goto out_unmap;
rp->tx_buffs[prod].skb = NULL;
rp->tx_buffs[prod].mapping = mapping;
@@ -6737,6 +6745,19 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
out:
return NETDEV_TX_OK;
+out_unmap:
+ while (i--) {
+ const skb_frag_t *frag;
+
+ prod = PREVIOUS_TX(rp, prod);
+ frag = &skb_shinfo(skb)->frags[i];
+ np->ops->unmap_page(np->device, rp->tx_buffs[prod].mapping,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+
+ np->ops->unmap_single(np->device, rp->tx_buffs[rp->prod].mapping,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
out_drop:
rp->tx_errors++;
kfree_skb(skb);
@@ -6761,7 +6782,9 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
niu_free_channels(np);
+ netdev_lock(dev);
niu_enable_napi(np);
+ netdev_unlock(dev);
err = niu_alloc_channels(np);
if (err)
@@ -9058,6 +9081,8 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
msi_vec[i].entry = i;
}
+ pdev->dev_flags |= PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST;
+
num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
if (num_irqs < 0) {
np->flags &= ~NIU_FLAGS_MSIX;
@@ -9636,6 +9661,11 @@ static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
dma_unmap_single(dev, dma_address, size, direction);
}
+static int niu_pci_mapping_error(struct device *dev, u64 addr)
+{
+ return dma_mapping_error(dev, addr);
+}
+
static const struct niu_ops niu_pci_ops = {
.alloc_coherent = niu_pci_alloc_coherent,
.free_coherent = niu_pci_free_coherent,
@@ -9643,6 +9673,7 @@ static const struct niu_ops niu_pci_ops = {
.unmap_page = niu_pci_unmap_page,
.map_single = niu_pci_map_single,
.unmap_single = niu_pci_unmap_single,
+ .mapping_error = niu_pci_mapping_error,
};
static void niu_driver_version(void)
@@ -9908,7 +9939,7 @@ static int __maybe_unused niu_suspend(struct device *dev_d)
flush_work(&np->reset_task);
niu_netif_stop(np);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
spin_lock_irqsave(&np->lock, flags);
niu_enable_interrupts(np, 0);
@@ -9937,6 +9968,7 @@ static int __maybe_unused niu_resume(struct device *dev_d)
spin_lock_irqsave(&np->lock, flags);
+ netdev_lock(dev);
err = niu_init_hw(np);
if (!err) {
np->timer.expires = jiffies + HZ;
@@ -9945,6 +9977,7 @@ static int __maybe_unused niu_resume(struct device *dev_d)
}
spin_unlock_irqrestore(&np->lock, flags);
+ netdev_unlock(dev);
return err;
}
@@ -10009,6 +10042,11 @@ static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
/* Nothing to do. */
}
+static int niu_phys_mapping_error(struct device *dev, u64 dma_address)
+{
+ return false;
+}
+
static const struct niu_ops niu_phys_ops = {
.alloc_coherent = niu_phys_alloc_coherent,
.free_coherent = niu_phys_free_coherent,
@@ -10016,6 +10054,7 @@ static const struct niu_ops niu_phys_ops = {
.unmap_page = niu_phys_unmap_page,
.map_single = niu_phys_map_single,
.unmap_single = niu_phys_unmap_single,
+ .mapping_error = niu_phys_mapping_error,
};
static int niu_of_probe(struct platform_device *op)
diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h
index 04c215f91fc0..0b169c08b0f2 100644
--- a/drivers/net/ethernet/sun/niu.h
+++ b/drivers/net/ethernet/sun/niu.h
@@ -2879,6 +2879,9 @@ struct tx_ring_info {
#define NEXT_TX(tp, index) \
(((index) + 1) < (tp)->pending ? ((index) + 1) : 0)
+#define PREVIOUS_TX(tp, index) \
+ (((index) - 1) >= 0 ? ((index) - 1) : (((tp)->pending) - 1))
+
static inline u32 niu_tx_avail(struct tx_ring_info *tp)
{
return (tp->pending -
@@ -3140,6 +3143,7 @@ struct niu_ops {
enum dma_data_direction direction);
void (*unmap_single)(struct device *dev, u64 dma_address,
size_t size, enum dma_data_direction direction);
+ int (*mapping_error)(struct device *dev, u64 dma_address);
};
struct niu_link_config {
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index bbb3a6ca19ed..edb2fd3a6551 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -526,7 +526,7 @@ static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
static void bigmac_timer(struct timer_list *t)
{
- struct bigmac *bp = from_timer(bp, t, bigmac_timer);
+ struct bigmac *bp = timer_container_of(bp, t, bigmac_timer);
void __iomem *tregs = bp->tregs;
int restart_timer = 0;
@@ -931,7 +931,7 @@ static int bigmac_close(struct net_device *dev)
{
struct bigmac *bp = netdev_priv(dev);
- del_timer(&bp->bigmac_timer);
+ timer_delete(&bp->bigmac_timer);
bp->timer_state = asleep;
bp->timer_ticks = 0;
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 3e5f9b17c777..8e69d917d827 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1481,7 +1481,7 @@ static int gem_mdio_link_not_up(struct gem *gp)
static void gem_link_timer(struct timer_list *t)
{
- struct gem *gp = from_timer(gp, t, link_timer);
+ struct gem *gp = timer_container_of(gp, t, link_timer);
struct net_device *dev = gp->dev;
int restart_aneg = 0;
@@ -2180,7 +2180,7 @@ static void gem_do_stop(struct net_device *dev, int wol)
gem_disable_ints(gp);
/* Stop the link timer */
- del_timer_sync(&gp->link_timer);
+ timer_delete_sync(&gp->link_timer);
/* We cannot cancel the reset task while holding the
* rtnl lock, we'd get an A->B / B->A deadlock stituation
@@ -2230,7 +2230,7 @@ static void gem_reset_task(struct work_struct *work)
}
/* Stop the link timer */
- del_timer_sync(&gp->link_timer);
+ timer_delete_sync(&gp->link_timer);
/* Stop NAPI and tx */
gem_netif_stop(gp);
@@ -2610,7 +2610,7 @@ static int gem_set_link_ksettings(struct net_device *dev,
/* Apply settings and restart link process. */
if (netif_device_present(gp->dev)) {
- del_timer_sync(&gp->link_timer);
+ timer_delete_sync(&gp->link_timer);
gem_begin_auto_negotiation(gp, cmd);
}
@@ -2626,7 +2626,7 @@ static int gem_nway_reset(struct net_device *dev)
/* Restart link process */
if (netif_device_present(gp->dev)) {
- del_timer_sync(&gp->link_timer);
+ timer_delete_sync(&gp->link_timer);
gem_begin_auto_negotiation(gp, NULL);
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 50ace461a1af..4bc0e114d5ee 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -721,7 +721,7 @@ force_link:
static void happy_meal_timer(struct timer_list *t)
{
- struct happy_meal *hp = from_timer(hp, t, happy_timer);
+ struct happy_meal *hp = timer_container_of(hp, t, happy_timer);
void __iomem *tregs = hp->tcvregs;
int restart_timer = 0;
@@ -1265,7 +1265,7 @@ static int happy_meal_init(struct happy_meal *hp)
u32 regtmp, rxcfg;
/* If auto-negotiation timer is running, kill it. */
- del_timer(&hp->happy_timer);
+ timer_delete(&hp->happy_timer);
HMD("happy_flags[%08x]\n", hp->happy_flags);
if (!(hp->happy_flags & HFLAG_INIT)) {
@@ -1922,7 +1922,7 @@ static int happy_meal_close(struct net_device *dev)
happy_meal_clean_rings(hp);
/* If auto-negotiation timer is running, kill it. */
- del_timer(&hp->happy_timer);
+ timer_delete(&hp->happy_timer);
spin_unlock_irq(&hp->happy_lock);
@@ -2184,7 +2184,7 @@ static int hme_set_link_ksettings(struct net_device *dev,
/* Ok, do it to it. */
spin_lock_irq(&hp->happy_lock);
- del_timer(&hp->happy_timer);
+ timer_delete(&hp->happy_timer);
happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
spin_unlock_irq(&hp->happy_lock);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1e887d951a04..a2a3e94da4b8 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -505,7 +505,7 @@ static void vnet_port_remove(struct vio_dev *vdev)
struct vnet_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
- del_timer_sync(&port->vio.timer);
+ timer_delete_sync(&port->vio.timer);
napi_disable(&port->napi);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 1cacb2a0ee03..0212853c9430 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1044,7 +1044,7 @@ static inline void vnet_free_skbs(struct sk_buff *skb)
void sunvnet_clean_timer_expire_common(struct timer_list *t)
{
- struct vnet_port *port = from_timer(port, t, clean_timer);
+ struct vnet_port *port = timer_container_of(port, t, clean_timer);
struct sk_buff *freeskbs;
unsigned pending;
@@ -1058,7 +1058,7 @@ void sunvnet_clean_timer_expire_common(struct timer_list *t)
(void)mod_timer(&port->clean_timer,
jiffies + VNET_CLEAN_TIMEOUT);
else
- del_timer(&port->clean_timer);
+ timer_delete(&port->clean_timer);
}
EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common);
@@ -1513,7 +1513,7 @@ out_dropped:
(void)mod_timer(&port->clean_timer,
jiffies + VNET_CLEAN_TIMEOUT);
else if (port)
- del_timer(&port->clean_timer);
+ timer_delete(&port->clean_timer);
rcu_read_unlock();
dev_kfree_skb(skb);
vnet_free_skbs(freeskbs);
@@ -1707,7 +1707,7 @@ EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
void vnet_port_reset(struct vnet_port *port)
{
- del_timer(&port->clean_timer);
+ timer_delete(&port->clean_timer);
sunvnet_port_free_tx_bufs_common(port);
port->rmtu = 0;
port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 721d8ed3f302..5e0e4c9ecbb0 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -199,7 +199,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = spl2sw_ethernet_start_xmit,
.ndo_set_rx_mode = spl2sw_ethernet_set_rx_mode,
.ndo_set_mac_address = spl2sw_ethernet_set_mac_address,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = spl2sw_ethernet_tx_timeout,
};
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index d1793b6154c7..37101dc50d04 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -360,7 +360,8 @@ static irqreturn_t xlgmac_dma_isr(int irq, void *data)
static void xlgmac_tx_timer(struct timer_list *t)
{
- struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
+ struct xlgmac_channel *channel = timer_container_of(channel, t,
+ tx_timer);
struct xlgmac_pdata *pdata = channel->pdata;
struct napi_struct *napi;
@@ -405,7 +406,7 @@ static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
if (!channel->tx_ring)
break;
- del_timer_sync(&channel->tx_timer);
+ timer_delete_sync(&channel->tx_timer);
}
}
diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c
index 259bdac24cf2..558b791a97ed 100644
--- a/drivers/net/ethernet/tehuti/tn40.c
+++ b/drivers/net/ethernet/tehuti/tn40.c
@@ -1778,7 +1778,7 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = tn40_phy_register(priv);
if (ret) {
dev_err(&pdev->dev, "failed to set up PHY.\n");
- goto err_free_irq;
+ goto err_cleanup_swnodes;
}
ret = tn40_priv_init(priv);
@@ -1795,6 +1795,8 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_unregister_phydev:
tn40_phy_unregister(priv);
+err_cleanup_swnodes:
+ tn40_swnodes_cleanup(priv);
err_free_irq:
pci_free_irq_vectors(pdev);
err_unset_drvdata:
@@ -1816,6 +1818,7 @@ static void tn40_remove(struct pci_dev *pdev)
unregister_netdev(ndev);
tn40_phy_unregister(priv);
+ tn40_swnodes_cleanup(priv);
pci_free_irq_vectors(priv->pdev);
pci_set_drvdata(pdev, NULL);
iounmap(priv->regs);
@@ -1832,6 +1835,10 @@ static const struct pci_device_id tn40_id_table[] = {
PCI_VENDOR_ID_ASUSTEK, 0x8709) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
PCI_VENDOR_ID_EDIMAX, 0x8103) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_TEHUTI, 0x3015) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_EDIMAX, 0x8102) },
{ }
};
diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h
index 490781fe5120..25da8686d469 100644
--- a/drivers/net/ethernet/tehuti/tn40.h
+++ b/drivers/net/ethernet/tehuti/tn40.h
@@ -4,10 +4,13 @@
#ifndef _TN40_H_
#define _TN40_H_
+#include <linux/property.h>
#include "tn40_regs.h"
#define TN40_DRV_NAME "tn40xx"
+#define PCI_DEVICE_ID_TEHUTI_TN9510 0x4025
+
#define TN40_MDIO_SPEED_1MHZ (1)
#define TN40_MDIO_SPEED_6MHZ (6)
@@ -102,10 +105,39 @@ struct tn40_txdb {
int size; /* Number of elements in the db */
};
+#define NODE_PROP(_NAME, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .properties = _PROP, \
+ })
+
+#define NODE_PAR_PROP(_NAME, _PAR, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .parent = _PAR, \
+ .properties = _PROP, \
+ })
+
+enum tn40_swnodes {
+ SWNODE_MDIO,
+ SWNODE_PHY,
+ SWNODE_MAX
+};
+
+struct tn40_nodes {
+ char phy_name[32];
+ char mdio_name[32];
+ struct property_entry phy_props[3];
+ struct software_node swnodes[SWNODE_MAX];
+ const struct software_node *group[SWNODE_MAX + 1];
+};
+
struct tn40_priv {
struct net_device *ndev;
struct pci_dev *pdev;
+ struct tn40_nodes nodes;
+
struct napi_struct napi;
/* RX FIFOs: 1 for data (full) descs, and 2 for free descs */
struct tn40_rxd_fifo rxd_fifo0;
@@ -225,6 +257,7 @@ static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val)
int tn40_set_link_speed(struct tn40_priv *priv, u32 speed);
+void tn40_swnodes_cleanup(struct tn40_priv *priv);
int tn40_mdiobus_init(struct tn40_priv *priv);
int tn40_phy_register(struct tn40_priv *priv);
diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c
index af18615d64a8..fb1a4a2e4dbc 100644
--- a/drivers/net/ethernet/tehuti/tn40_mdio.c
+++ b/drivers/net/ethernet/tehuti/tn40_mdio.c
@@ -14,6 +14,8 @@
(FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port))))
#define TN40_MDIO_CMD_READ BIT(15)
+#define AQR105_FIRMWARE "tehuti/aqr105-tn40xx.cld"
+
static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed)
{
void __iomem *regs = priv->regs;
@@ -111,6 +113,56 @@ static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum,
return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val);
}
+/* registers an mdio node and an aqr105 PHY at address 1
+ * tn40_mdio-%id {
+ * ethernet-phy@1 {
+ * compatible = "ethernet-phy-id03a1.b4a3";
+ * reg = <1>;
+ * firmware-name = AQR105_FIRMWARE;
+ * };
+ * };
+ */
+static int tn40_swnodes_register(struct tn40_priv *priv)
+{
+ struct tn40_nodes *nodes = &priv->nodes;
+ struct pci_dev *pdev = priv->pdev;
+ struct software_node *swnodes;
+ u32 id;
+
+ id = pci_dev_id(pdev);
+
+ snprintf(nodes->phy_name, sizeof(nodes->phy_name), "ethernet-phy@1");
+ snprintf(nodes->mdio_name, sizeof(nodes->mdio_name), "tn40_mdio-%x",
+ id);
+
+ swnodes = nodes->swnodes;
+
+ swnodes[SWNODE_MDIO] = NODE_PROP(nodes->mdio_name, NULL);
+
+ nodes->phy_props[0] = PROPERTY_ENTRY_STRING("compatible",
+ "ethernet-phy-id03a1.b4a3");
+ nodes->phy_props[1] = PROPERTY_ENTRY_U32("reg", 1);
+ nodes->phy_props[2] = PROPERTY_ENTRY_STRING("firmware-name",
+ AQR105_FIRMWARE);
+ swnodes[SWNODE_PHY] = NODE_PAR_PROP(nodes->phy_name,
+ &swnodes[SWNODE_MDIO],
+ nodes->phy_props);
+
+ nodes->group[SWNODE_PHY] = &swnodes[SWNODE_PHY];
+ nodes->group[SWNODE_MDIO] = &swnodes[SWNODE_MDIO];
+ return software_node_register_node_group(nodes->group);
+}
+
+void tn40_swnodes_cleanup(struct tn40_priv *priv)
+{
+ /* cleanup of swnodes is only needed for AQR105-based cards */
+ if (priv->pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ fwnode_handle_put(dev_fwnode(&priv->mdio->dev));
+ device_remove_software_node(&priv->mdio->dev);
+ software_node_unregister_node_group(priv->nodes.group);
+ }
+}
+
int tn40_mdiobus_init(struct tn40_priv *priv)
{
struct pci_dev *pdev = priv->pdev;
@@ -129,14 +181,40 @@ int tn40_mdiobus_init(struct tn40_priv *priv)
bus->read_c45 = tn40_mdio_read_c45;
bus->write_c45 = tn40_mdio_write_c45;
+ priv->mdio = bus;
+ /* provide swnodes for AQR105-based cards only */
+ if (pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ ret = tn40_swnodes_register(priv);
+ if (ret) {
+ pr_err("swnodes failed\n");
+ return ret;
+ }
+
+ ret = device_add_software_node(&bus->dev,
+ priv->nodes.group[SWNODE_MDIO]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "device_add_software_node failed: %d\n", ret);
+ goto err_swnodes_unregister;
+ }
+ }
+
+ tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
ret = devm_mdiobus_register(&pdev->dev, bus);
if (ret) {
dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n",
ret, bus->state, MDIOBUS_UNREGISTERED);
- return ret;
+ goto err_swnodes_cleanup;
}
- tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
- priv->mdio = bus;
return 0;
+
+err_swnodes_unregister:
+ software_node_unregister_node_group(priv->nodes.group);
+ return ret;
+err_swnodes_cleanup:
+ tn40_swnodes_cleanup(priv);
+ return ret;
}
+
+MODULE_FIRMWARE(AQR105_FIRMWARE);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 0d5a862cd78a..a07c910c497a 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -99,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS
select NET_DEVLINK
select TI_DAVINCI_MDIO
select PHYLINK
+ select PAGE_POOL
select TI_K3_CPPI_DESC_POOL
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
@@ -204,6 +205,7 @@ config TI_ICSSG_PRUETH_SR1
select PHYLIB
select TI_ICSS_IEP
select TI_K3_CPPI_DESC_POOL
+ select PAGE_POOL
depends on PRU_REMOTEPROC
depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 14e1df721f2e..f20d1ff192ef 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -32,6 +32,7 @@
#include <linux/dma/ti-cppi5.h>
#include <linux/dma/k3-udma-glue.h>
#include <net/page_pool/helpers.h>
+#include <net/dsa.h>
#include <net/switchdev.h>
#include "cpsw_ale.h"
@@ -163,6 +164,7 @@
#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
/* XDP */
+#define AM65_CPSW_XDP_TX BIT(2)
#define AM65_CPSW_XDP_CONSUMED BIT(1)
#define AM65_CPSW_XDP_REDIRECT BIT(0)
#define AM65_CPSW_XDP_PASS 0
@@ -425,7 +427,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
if (netif_tx_queue_stopped(netif_txq)) {
/* try recover if stopped by us */
- txq_trans_update(netif_txq);
+ txq_trans_update(ndev, netif_txq);
netif_tx_wake_queue(netif_txq);
}
}
@@ -497,35 +499,62 @@ static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
+ struct page *page,
+ bool allow_direct);
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
-static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
+static void am65_cpsw_destroy_rxq(struct am65_cpsw_common *common, int id)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
- int id, port;
+ int port;
- for (id = 0; id < common->rx_ch_num_flows; id++) {
- flow = &rx_chn->flows[id];
+ flow = &rx_chn->flows[id];
+ napi_disable(&flow->napi_rx);
+ hrtimer_cancel(&flow->rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, id, rx_chn,
+ am65_cpsw_nuss_rx_cleanup);
- for (port = 0; port < common->port_num; port++) {
- if (!common->ports[port].ndev)
- continue;
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ continue;
- rxq = &common->ports[port].xdp_rxq[id];
+ rxq = &common->ports[port].xdp_rxq[id];
- if (xdp_rxq_info_is_reg(rxq))
- xdp_rxq_info_unreg(rxq);
- }
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+ }
- if (flow->page_pool) {
- page_pool_destroy(flow->page_pool);
- flow->page_pool = NULL;
- }
+ if (flow->page_pool) {
+ page_pool_destroy(flow->page_pool);
+ flow->page_pool = NULL;
+ }
+}
+
+static void am65_cpsw_destroy_rxqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ int id;
+
+ reinit_completion(&common->tdown_complete);
+ k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
+
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
+ id = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
+ if (!id)
+ dev_err(common->dev, "rx teardown timeout\n");
}
+
+ for (id = common->rx_ch_num_flows - 1; id >= 0; id--)
+ am65_cpsw_destroy_rxq(common, id);
+
+ k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
}
-static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
+static int am65_cpsw_create_rxq(struct am65_cpsw_common *common, int id)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct page_pool_params pp_params = {
@@ -540,45 +569,162 @@ static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
struct page_pool *pool;
- int id, port, ret;
+ struct page *page;
+ int port, ret, i;
+
+ flow = &rx_chn->flows[id];
+ pp_params.napi = &flow->napi_rx;
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ return ret;
+ }
+
+ flow->page_pool = pool;
+
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ /* FIXME should we BUG here? */
+ continue;
+
+ rxq = &common->ports[port].xdp_rxq[id];
+ ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
+ id, flow->napi_rx.napi_id);
+ if (ret)
+ goto err;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
+ page = page_pool_dev_alloc_pages(flow->page_pool);
+ if (!page) {
+ dev_err(common->dev, "cannot allocate page in flow %d\n",
+ id);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = am65_cpsw_nuss_rx_push(common, page, id);
+ if (ret < 0) {
+ dev_err(common->dev,
+ "cannot submit page to rx channel flow %d, error %d\n",
+ id, ret);
+ am65_cpsw_put_page(flow, page, false);
+ goto err;
+ }
+ }
+
+ napi_enable(&flow->napi_rx);
+ return 0;
+
+err:
+ am65_cpsw_destroy_rxq(common, id);
+ return ret;
+}
+
+static int am65_cpsw_create_rxqs(struct am65_cpsw_common *common)
+{
+ int id, ret;
for (id = 0; id < common->rx_ch_num_flows; id++) {
- flow = &rx_chn->flows[id];
- pp_params.napi = &flow->napi_rx;
- pool = page_pool_create(&pp_params);
- if (IS_ERR(pool)) {
- ret = PTR_ERR(pool);
+ ret = am65_cpsw_create_rxq(common, id);
+ if (ret) {
+ dev_err(common->dev, "couldn't create rxq %d: %d\n",
+ id, ret);
goto err;
}
+ }
- flow->page_pool = pool;
+ ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+ if (ret) {
+ dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
+ goto err;
+ }
- /* using same page pool is allowed as no running rx handlers
- * simultaneously for both ndevs
- */
- for (port = 0; port < common->port_num; port++) {
- if (!common->ports[port].ndev)
- continue;
+ return 0;
+
+err:
+ for (--id; id >= 0; id--)
+ am65_cpsw_destroy_rxq(common, id);
+
+ return ret;
+}
+
+static void am65_cpsw_destroy_txq(struct am65_cpsw_common *common, int id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id];
+
+ napi_disable(&tx_chn->napi_tx);
+ hrtimer_cancel(&tx_chn->tx_hrtimer);
+ k3_udma_glue_reset_tx_chn(tx_chn->tx_chn, tx_chn,
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(tx_chn->tx_chn);
+}
+
+static void am65_cpsw_destroy_txqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
+ int id;
+
+ /* shutdown tx channels */
+ atomic_set(&common->tdown_cnt, common->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ reinit_completion(&common->tdown_complete);
+
+ for (id = 0; id < common->tx_ch_num; id++)
+ k3_udma_glue_tdown_tx_chn(tx_chn[id].tx_chn, false);
+
+ id = wait_for_completion_timeout(&common->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!id)
+ dev_err(common->dev, "tx teardown timeout\n");
+
+ for (id = common->tx_ch_num - 1; id >= 0; id--)
+ am65_cpsw_destroy_txq(common, id);
+}
+
+static int am65_cpsw_create_txq(struct am65_cpsw_common *common, int id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id];
+ int ret;
+
+ ret = k3_udma_glue_enable_tx_chn(tx_chn->tx_chn);
+ if (ret)
+ return ret;
- rxq = &common->ports[port].xdp_rxq[id];
+ napi_enable(&tx_chn->napi_tx);
- ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
- id, flow->napi_rx.napi_id);
- if (ret)
- goto err;
+ return 0;
+}
- ret = xdp_rxq_info_reg_mem_model(rxq,
- MEM_TYPE_PAGE_POOL,
- pool);
- if (ret)
- goto err;
+static int am65_cpsw_create_txqs(struct am65_cpsw_common *common)
+{
+ int id, ret;
+
+ for (id = 0; id < common->tx_ch_num; id++) {
+ ret = am65_cpsw_create_txq(common, id);
+ if (ret) {
+ dev_err(common->dev, "couldn't create txq %d: %d\n",
+ id, ret);
+ goto err;
}
}
return 0;
err:
- am65_cpsw_destroy_xdp_rxqs(common);
+ for (--id; id >= 0; id--)
+ am65_cpsw_destroy_txq(common, id);
+
return ret;
}
@@ -642,7 +788,6 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
}
@@ -684,21 +829,30 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_tx_chn *tx_chn = data;
+ enum am65_cpsw_tx_buf_type buf_type;
+ struct am65_cpsw_tx_swdata *swdata;
struct cppi5_host_desc_t *desc_tx;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
- void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = swdata->skb;
+ dev_kfree_skb_any(skb);
+ } else {
+ xdpf = swdata->xdpf;
+ xdp_return_frame(xdpf);
+ }
- dev_kfree_skb_any(skb);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
}
static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
struct net_device *ndev,
- unsigned int len)
+ unsigned int len,
+ unsigned int headroom)
{
struct sk_buff *skb;
@@ -708,7 +862,7 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, AM65_CPSW_HEADROOM);
+ skb_reserve(skb, headroom);
skb->dev = ndev;
return skb;
@@ -717,12 +871,8 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
- struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
- int port_idx, i, ret, tx, flow_idx;
- struct am65_cpsw_rx_flow *flow;
u32 val, port_mask;
- struct page *page;
+ int port_idx, ret;
if (common->usage_count)
return 0;
@@ -762,7 +912,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
ALE_DEFAULT_THREAD_ID, 0);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_DEFAULT_THREAD_ENABLE, 1);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
@@ -782,151 +932,38 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
am65_cpsw_qos_tx_p0_rate_init(common);
- ret = am65_cpsw_create_xdp_rxqs(common);
- if (ret) {
- dev_err(common->dev, "Failed to create XDP rx queues\n");
+ ret = am65_cpsw_create_rxqs(common);
+ if (ret)
return ret;
- }
-
- for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) {
- flow = &rx_chn->flows[flow_idx];
- for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
- page = page_pool_dev_alloc_pages(flow->page_pool);
- if (!page) {
- dev_err(common->dev, "cannot allocate page in flow %d\n",
- flow_idx);
- ret = -ENOMEM;
- goto fail_rx;
- }
-
- ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
- if (ret < 0) {
- dev_err(common->dev,
- "cannot submit page to rx channel flow %d, error %d\n",
- flow_idx, ret);
- am65_cpsw_put_page(flow, page, false);
- goto fail_rx;
- }
- }
- }
- ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn);
- if (ret) {
- dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
- goto fail_rx;
- }
-
- for (i = 0; i < common->rx_ch_num_flows ; i++) {
- napi_enable(&rx_chn->flows[i].napi_rx);
- if (rx_chn->flows[i].irq_disabled) {
- rx_chn->flows[i].irq_disabled = false;
- enable_irq(rx_chn->flows[i].irq);
- }
- }
-
- for (tx = 0; tx < common->tx_ch_num; tx++) {
- ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
- if (ret) {
- dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
- tx, ret);
- tx--;
- goto fail_tx;
- }
- napi_enable(&tx_chn[tx].napi_tx);
- }
+ ret = am65_cpsw_create_txqs(common);
+ if (ret)
+ goto cleanup_rx;
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
-fail_tx:
- while (tx >= 0) {
- napi_disable(&tx_chn[tx].napi_tx);
- k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn);
- tx--;
- }
-
- for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
- flow = &rx_chn->flows[flow_idx];
- if (!flow->irq_disabled) {
- disable_irq(flow->irq);
- flow->irq_disabled = true;
- }
- napi_disable(&flow->napi_rx);
- }
-
- k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
-
-fail_rx:
- for (i = 0; i < common->rx_ch_num_flows; i++)
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!i);
-
- am65_cpsw_destroy_xdp_rxqs(common);
+cleanup_rx:
+ am65_cpsw_destroy_rxqs(common);
return ret;
}
static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
{
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
- struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
- int i;
-
if (common->usage_count != 1)
return 0;
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
- /* shutdown tx channels */
- atomic_set(&common->tdown_cnt, common->tx_ch_num);
- /* ensure new tdown_cnt value is visible */
- smp_mb__after_atomic();
- reinit_completion(&common->tdown_complete);
-
- for (i = 0; i < common->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false);
-
- i = wait_for_completion_timeout(&common->tdown_complete,
- msecs_to_jiffies(1000));
- if (!i)
- dev_err(common->dev, "tx timeout\n");
- for (i = 0; i < common->tx_ch_num; i++) {
- napi_disable(&tx_chn[i].napi_tx);
- hrtimer_cancel(&tx_chn[i].tx_hrtimer);
- }
-
- for (i = 0; i < common->tx_ch_num; i++) {
- k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i],
- am65_cpsw_nuss_tx_cleanup);
- k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
- }
-
- reinit_completion(&common->tdown_complete);
- k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
-
- if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
- i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
- if (!i)
- dev_err(common->dev, "rx teardown timeout\n");
- }
-
- for (i = common->rx_ch_num_flows - 1; i >= 0; i--) {
- napi_disable(&rx_chn->flows[i].napi_rx);
- hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!i);
- }
-
- k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
-
+ am65_cpsw_destroy_txqs(common);
+ am65_cpsw_destroy_rxqs(common);
cpsw_ale_stop(common->ale);
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
- am65_cpsw_destroy_xdp_rxqs(common);
-
dev_dbg(common->dev, "cpsw_nuss stopped\n");
return 0;
}
@@ -1014,6 +1051,15 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
common->usage_count++;
+ /* VLAN aware CPSW mode is incompatible with some DSA tagging schemes.
+ * Therefore disable VLAN_AWARE mode if any of the ports is a DSA Port.
+ */
+ if (netdev_uses_dsa(ndev)) {
+ reg = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+ reg &= ~AM65_CPSW_CTL_VLAN_AWARE;
+ writel(reg, common->cpsw_base + AM65_CPSW_REG_CTL);
+ }
+
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
am65_cpsw_port_enable_dscp_map(port);
@@ -1053,10 +1099,10 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct cppi5_host_desc_t *host_desc;
+ struct am65_cpsw_tx_swdata *swdata;
struct netdev_queue *netif_txq;
dma_addr_t dma_desc, dma_buf;
u32 pkt_len = xdpf->len;
- void **swdata;
int ret;
host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
@@ -1086,7 +1132,8 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
swdata = cppi5_hdesc_get_swdata(host_desc);
- *(swdata) = xdpf;
+ swdata->ndev = ndev;
+ swdata->xdpf = xdpf;
/* Report BQL before sending the packet */
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
@@ -1122,20 +1169,21 @@ pool_free:
static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct am65_cpsw_port *port,
- struct xdp_buff *xdp,
- int cpu, int *len)
+ struct xdp_buff *xdp, int *len)
{
struct am65_cpsw_common *common = flow->common;
struct net_device *ndev = port->ndev;
int ret = AM65_CPSW_XDP_CONSUMED;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
struct xdp_frame *xdpf;
struct bpf_prog *prog;
- struct page *page;
+ int pkt_len;
u32 act;
int err;
+ pkt_len = *len;
prog = READ_ONCE(port->xdp_prog);
if (!prog)
return AM65_CPSW_XDP_PASS;
@@ -1146,15 +1194,16 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
switch (act) {
case XDP_PASS:
- ret = AM65_CPSW_XDP_PASS;
- goto out;
+ return AM65_CPSW_XDP_PASS;
case XDP_TX:
tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ ndev->stats.tx_dropped++;
goto drop;
+ }
__netif_tx_lock(netif_txq, cpu);
err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
@@ -1163,16 +1212,14 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
if (err)
goto drop;
- dev_sw_netstats_tx_add(ndev, 1, *len);
- ret = AM65_CPSW_XDP_CONSUMED;
- goto out;
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+ return AM65_CPSW_XDP_TX;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
goto drop;
- dev_sw_netstats_rx_add(ndev, *len);
- ret = AM65_CPSW_XDP_REDIRECT;
- goto out;
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+ return AM65_CPSW_XDP_REDIRECT;
default:
bpf_warn_invalid_xdp_action(ndev, prog, act);
fallthrough;
@@ -1184,10 +1231,6 @@ drop:
ndev->stats.rx_dropped++;
}
- page = virt_to_head_page(xdp->data);
- am65_cpsw_put_page(flow, page, true);
-
-out:
return ret;
}
@@ -1225,7 +1268,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
}
static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
- int cpu, int *xdp_state)
+ int *xdp_state)
{
struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
@@ -1279,28 +1322,32 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
-
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- skb = am65_cpsw_build_skb(page_addr, ndev,
- AM65_CPSW_MAX_PACKET_SIZE);
- if (unlikely(!skb)) {
- new_page = page;
- goto requeue;
- }
-
if (port->xdp_prog) {
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
pkt_len, false);
- *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp,
- cpu, &pkt_len);
+ *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, &pkt_len);
+ if (*xdp_state == AM65_CPSW_XDP_CONSUMED) {
+ page = virt_to_head_page(xdp.data);
+ am65_cpsw_put_page(flow, page, true);
+ goto allocate;
+ }
+
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
- /* Compute additional headroom to be reserved */
- headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
- skb_reserve(skb, headroom);
+ headroom = xdp.data - xdp.data_hard_start;
+ } else {
+ headroom = AM65_CPSW_HEADROOM;
+ }
+
+ skb = am65_cpsw_build_skb(page_addr, ndev,
+ AM65_CPSW_MAX_PACKET_SIZE, headroom);
+ if (unlikely(!skb)) {
+ new_page = page;
+ goto requeue;
}
ndev_priv = netdev_priv(ndev);
@@ -1353,7 +1400,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
struct am65_cpsw_common *common = flow->common;
- int cpu = smp_processor_id();
int xdp_state_or = 0;
int cur_budget, ret;
int xdp_state;
@@ -1362,7 +1408,7 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
/* process only this flow */
cur_budget = budget;
while (cur_budget--) {
- ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state);
+ ret = am65_cpsw_nuss_rx_packets(flow, &xdp_state);
xdp_state_or |= xdp_state;
if (ret)
break;
@@ -1390,52 +1436,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
return num_rx;
}
-static struct sk_buff *
-am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma)
-{
- struct cppi5_host_desc_t *desc_tx;
- struct sk_buff *skb;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
-
- dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
-
- return skb;
-}
-
-static struct xdp_frame *
-am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
- struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma,
- struct net_device **ndev)
-{
- struct cppi5_host_desc_t *desc_tx;
- struct am65_cpsw_port *port;
- struct xdp_frame *xdpf;
- u32 port_id = 0;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
- cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- xdpf = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- port = am65_common_get_port(common, port_id);
- dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len);
- *ndev = port->ndev;
-
- return xdpf;
-}
-
static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
struct netdev_queue *netif_txq)
{
@@ -1456,13 +1456,17 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
int chn, unsigned int budget, bool *tdown)
{
+ bool single_port = AM65_CPSW_IS_CPSW2G(common);
enum am65_cpsw_tx_buf_type buf_type;
+ struct am65_cpsw_tx_swdata *swdata;
+ struct cppi5_host_desc_t *desc_tx;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
struct xdp_frame *xdpf;
+ unsigned int pkt_len;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -1470,9 +1474,12 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
tx_chn = &common->tx_chns[chn];
while (true) {
- spin_lock(&tx_chn->lock);
+ if (!single_port)
+ spin_lock(&tx_chn->lock);
res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- spin_unlock(&tx_chn->lock);
+ if (!single_port)
+ spin_unlock(&tx_chn->lock);
+
if (res == -ENODATA)
break;
@@ -1483,27 +1490,43 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
break;
}
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ ndev = swdata->ndev;
buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
- ndev = skb->dev;
- total_bytes = skb->len;
+ skb = swdata->skb;
+ am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
+ pkt_len = skb->len;
napi_consume_skb(skb, budget);
} else {
- xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
- desc_dma, &ndev);
- total_bytes = xdpf->len;
+ xdpf = swdata->xdpf;
+ pkt_len = xdpf->len;
if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
}
+
+ total_bytes += pkt_len;
num_tx++;
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ dev_sw_netstats_tx_add(ndev, 1, pkt_len);
+ if (!single_port) {
+ /* as packets from multi ports can be interleaved
+ * on the same channel, we have to figure out the
+ * port/queue at every packet and report it/wake queue.
+ */
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
+ }
+ }
+ if (single_port) {
netif_txq = netdev_get_tx_queue(ndev, chn);
-
netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
}
@@ -1512,66 +1535,6 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
return num_tx;
}
-static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
- int chn, unsigned int budget, bool *tdown)
-{
- enum am65_cpsw_tx_buf_type buf_type;
- struct device *dev = common->dev;
- struct am65_cpsw_tx_chn *tx_chn;
- struct netdev_queue *netif_txq;
- unsigned int total_bytes = 0;
- struct net_device *ndev;
- struct xdp_frame *xdpf;
- struct sk_buff *skb;
- dma_addr_t desc_dma;
- int res, num_tx = 0;
-
- tx_chn = &common->tx_chns[chn];
-
- while (true) {
- res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- if (res == -ENODATA)
- break;
-
- if (cppi5_desc_is_tdcm(desc_dma)) {
- if (atomic_dec_and_test(&common->tdown_cnt))
- complete(&common->tdown_complete);
- *tdown = true;
- break;
- }
-
- buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
- if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
- ndev = skb->dev;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
- } else {
- xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
- desc_dma, &ndev);
- total_bytes += xdpf->len;
- if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
- xdp_return_frame_rx_napi(xdpf);
- else
- xdp_return_frame(xdpf);
- }
- num_tx++;
- }
-
- if (!num_tx)
- return 0;
-
- netif_txq = netdev_get_tx_queue(ndev, chn);
-
- netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
- am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
-
- dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
-
- return num_tx;
-}
-
static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
{
struct am65_cpsw_tx_chn *tx_chns =
@@ -1587,13 +1550,8 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
bool tdown = false;
int num_tx;
- if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
- num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id,
- budget, &tdown);
- else
- num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
- tx_chn->id, budget, &tdown);
-
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
+ tx_chn->id, budget, &tdown);
if (num_tx >= budget)
return budget;
@@ -1637,12 +1595,12 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_tx_swdata *swdata;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
dma_addr_t desc_dma, buf_dma;
int ret, q_idx, i;
- void **swdata;
u32 *psdata;
u32 pkt_len;
@@ -1688,7 +1646,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *(swdata) = skb;
+ swdata->ndev = ndev;
+ swdata->skb = skb;
psdata = cppi5_hdesc_get_psdata(first_desc);
/* HW csum offload if enabled */
@@ -2242,13 +2201,11 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
struct device *dev = common->dev;
int i;
- devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
-
common->tx_ch_rate_msk = 0;
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
- if (tx_chn->irq)
+ if (tx_chn->irq > 0)
devm_free_irq(dev, tx_chn->irq, tx_chn);
netif_napi_del(&tx_chn->napi_tx);
@@ -2260,15 +2217,17 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
int i, ret = 0;
for (i = 0; i < common->tx_ch_num; i++) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ tx_chn = &common->tx_chns[i];
+
+ hrtimer_setup(&tx_chn->tx_hrtimer, &am65_cpsw_nuss_tx_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
am65_cpsw_nuss_tx_poll);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
@@ -2281,7 +2240,16 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
}
}
+ return 0;
+
err:
+ netif_napi_del(&tx_chn->napi_tx);
+ for (--i; i >= 0; i--) {
+ tx_chn = &common->tx_chns[i];
+ devm_free_irq(dev, tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
+ }
+
return ret;
}
@@ -2362,12 +2330,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
goto err;
}
+ return 0;
+
err:
- i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
- if (i) {
- dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
- return i;
- }
+ am65_cpsw_nuss_free_tx_chns(common);
return ret;
}
@@ -2395,7 +2361,6 @@ static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
rx_chn = &common->rx_chns;
flows = rx_chn->flows;
- devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
for (i = 0; i < common->rx_ch_num_flows; i++) {
if (!(flows[i].irq < 0))
@@ -2494,7 +2459,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
i, &rx_flow_cfg);
if (ret) {
dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
- goto err;
+ goto err_flow;
}
if (!i)
fdqring_id =
@@ -2506,17 +2471,17 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
dev_err(dev, "Failed to get rx dma irq %d\n",
flow->irq);
ret = flow->irq;
- goto err;
+ goto err_flow;
}
snprintf(flow->name,
sizeof(flow->name), "%s-rx%d",
dev_name(dev), i);
+ hrtimer_setup(&flow->rx_hrtimer, &am65_cpsw_nuss_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+
netif_napi_add(common->dma_ndev, &flow->napi_rx,
am65_cpsw_nuss_rx_poll);
- hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
ret = devm_request_irq(dev, flow->irq,
am65_cpsw_nuss_rx_irq,
@@ -2526,20 +2491,28 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
dev_err(dev, "failure requesting rx %d irq %u, %d\n",
i, flow->irq, ret);
flow->irq = -EINVAL;
- goto err;
+ goto err_request_irq;
}
}
/* setup classifier to route priorities to flows */
cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
-err:
- i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
- if (i) {
- dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
- return i;
+ return 0;
+
+err_request_irq:
+ netif_napi_del(&flow->napi_rx);
+
+err_flow:
+ for (--i; i >= 0; i--) {
+ flow = &rx_chn->flows[i];
+ devm_free_irq(dev, flow->irq, flow);
+ netif_napi_del(&flow->napi_rx);
}
+err:
+ am65_cpsw_nuss_free_rx_chns(common);
+
return ret;
}
@@ -2559,20 +2532,15 @@ static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
{
u32 mac_lo, mac_hi, offset;
struct regmap *syscon;
- int ret;
- syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
+ syscon = syscon_regmap_lookup_by_phandle_args(of_node, "ti,syscon-efuse",
+ 1, &offset);
if (IS_ERR(syscon)) {
if (PTR_ERR(syscon) == -ENODEV)
return 0;
return PTR_ERR(syscon);
}
- ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
- &offset);
- if (ret)
- return ret;
-
regmap_read(syscon, offset, &mac_lo);
regmap_read(syscon, offset + 4, &mac_hi);
@@ -2698,7 +2666,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
of_property_read_bool(port_np, "ti,mac-only");
/* get phy/link info */
- port->slave.port_np = port_np;
+ port->slave.port_np = of_node_get(port_np);
ret = of_get_phy_mode(port_np, &port->slave.phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
@@ -2711,13 +2679,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
goto of_node_put;
ret = of_get_mac_address(port_np, port->slave.mac_addr);
- if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ goto of_node_put;
+ } else if (ret) {
am65_cpsw_am654_get_efuse_macid(port_np,
port->port_id,
port->slave.mac_addr);
if (!is_valid_ether_addr(port->slave.mac_addr)) {
eth_random_addr(port->slave.mac_addr);
- dev_err(dev, "Use random MAC address\n");
+ dev_info(dev, "Use random MAC address\n");
}
}
@@ -2752,6 +2722,17 @@ static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
}
}
+static void am65_cpsw_remove_dt(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ of_node_put(port->slave.port_np);
+ }
+}
+
static int
am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
{
@@ -2781,7 +2762,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
mutex_init(&ndev_priv->mm_lock);
port->qos.link_speed = SPEED_UNKNOWN;
SET_NETDEV_DEV(port->ndev, dev);
- port->ndev->dev.of_node = port->slave.port_np;
+ device_set_node(&port->ndev->dev, of_fwnode_handle(port->slave.port_np));
eth_hw_addr_set(port->ndev, port->slave.mac_addr);
@@ -3349,7 +3330,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
if (ret)
- return ret;
+ goto err_remove_tx;
/* The DMA Channels are not guaranteed to be in a clean state.
* Reset and disable them to ensure that they are back to the
@@ -3364,13 +3345,13 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
for (i = 0; i < common->rx_ch_num_flows; i++)
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
rx_chan,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ am65_cpsw_nuss_rx_cleanup);
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
- return ret;
+ goto err_remove_rx;
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
@@ -3401,6 +3382,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_unregister_devlink(common);
+err_remove_rx:
+ am65_cpsw_nuss_remove_rx_chns(common);
+err_remove_tx:
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
}
@@ -3420,6 +3405,8 @@ int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
+ if (ret)
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
}
@@ -3518,6 +3505,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
__be64 id_temp;
int ret, i;
+ BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_tx_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
+ "TX SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
+ BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
+ "SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
return -ENOMEM;
@@ -3551,7 +3542,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
init_completion(&common->tdown_complete);
common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
- common->pf_p0_rx_ptype_rrobin = false;
+ common->pf_p0_rx_ptype_rrobin = true;
common->default_vlan = 1;
common->ports = devm_kcalloc(dev, common->port_num,
@@ -3572,6 +3563,16 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return ret;
}
+ am65_cpsw_nuss_get_ver(common);
+
+ ret = am65_cpsw_nuss_init_host_p(common);
+ if (ret)
+ goto err_pm_clear;
+
+ ret = am65_cpsw_nuss_init_slave_ports(common);
+ if (ret)
+ goto err_pm_clear;
+
node = of_get_child_by_name(dev->of_node, "mdio");
if (!node) {
dev_warn(dev, "MDIO node not found\n");
@@ -3588,16 +3589,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
}
of_node_put(node);
- am65_cpsw_nuss_get_ver(common);
-
- ret = am65_cpsw_nuss_init_host_p(common);
- if (ret)
- goto err_of_clear;
-
- ret = am65_cpsw_nuss_init_slave_ports(common);
- if (ret)
- goto err_of_clear;
-
/* init common data */
ale_params.dev = dev;
ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
@@ -3644,6 +3635,7 @@ err_ndevs_clear:
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
+ am65_cpsw_remove_dt(common);
err_of_clear:
if (common->mdio_dev)
of_platform_device_destroy(common->mdio_dev, NULL);
@@ -3678,9 +3670,12 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_unregister_devlink(common);
+ am65_cpsw_nuss_remove_rx_chns(common);
+ am65_cpsw_nuss_remove_tx_chns(common);
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
am65_cpsw_disable_serdes_phy(common);
+ am65_cpsw_remove_dt(common);
if (common->mdio_dev)
of_platform_device_destroy(common->mdio_dev, NULL);
@@ -3739,8 +3734,10 @@ static int am65_cpsw_nuss_resume(struct device *dev)
if (ret)
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
- if (ret)
+ if (ret) {
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
+ }
/* If RX IRQ was disabled before suspend, keep it disabled */
for (i = 0; i < common->rx_ch_num_flows; i++) {
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index e7832a5cf3cc..917c37e4e89b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -104,6 +104,14 @@ struct am65_cpsw_rx_flow {
char name[32];
};
+struct am65_cpsw_tx_swdata {
+ struct net_device *ndev;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+};
+
struct am65_cpsw_swdata {
u32 flow_id;
struct page *page;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 4ef8cf6ea135..54c24cd3d3be 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -351,6 +351,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
int ret = 0, port, ch = xmeta->ch;
int headroom = CPSW_HEADROOM_NA;
struct net_device *ndev = xmeta->ndev;
+ u32 metasize = 0;
struct cpsw_priv *priv;
struct page_pool *pool;
struct sk_buff *skb;
@@ -400,7 +401,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_prepare_buff(&xdp, pa, headroom, size, false);
+ xdp_prepare_buff(&xdp, pa, headroom, size, true);
port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
@@ -408,6 +409,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
@@ -423,6 +425,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb_reserve(skb, headroom);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
@@ -635,6 +639,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = phy;
+ phy_disable_eee(slave->phy);
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -684,7 +690,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
soft_reset("cpsw", &cpsw->regs->soft_reset);
cpsw_ale_start(cpsw->ale);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&cpsw->regs->control);
@@ -1150,6 +1156,27 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
}
#endif
+/* We need a custom implementation of phy_do_ioctl_running() because in switch
+ * mode, dev->phydev may be different than the phy of the active_slave. We need
+ * to operate on the locally saved phy instead.
+ */
+static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+ struct phy_device *phy;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ phy = cpsw->slaves[slave_no].phy;
+ if (phy)
+ return phy_mii_ioctl(phy, req, cmd);
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -1168,6 +1195,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_setup_tc = cpsw_ndo_setup_tc,
.ndo_bpf = cpsw_ndo_bpf,
.ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
+ .ndo_hwtstamp_get = cpsw_hwtstamp_get,
+ .ndo_hwtstamp_set = cpsw_hwtstamp_set,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
@@ -1225,7 +1254,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee,
- .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
@@ -1641,6 +1669,9 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT;
+ /* Hijack PHY timestamping requests in order to block them */
+ if (!cpsw->data.dual_emac)
+ ndev->see_all_hwtstamp_requests = true;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 52e4e350b734..fbe35af615a6 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -127,15 +127,15 @@ struct cpsw_ale_dev_id {
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
- int idx, idx2;
+ int idx, idx2, index;
u32 hi_val = 0;
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be fetched exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
+ index = 2 - idx2; /* flip */
+ hi_val = ale_entry[index] << ((idx2 * 32) - start);
}
start -= idx * 32;
idx = 2 - idx; /* flip */
@@ -145,16 +145,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
u32 value)
{
- int idx, idx2;
+ int idx, idx2, index;
value &= BITMASK(bits);
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be set exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
- ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
+ index = 2 - idx2; /* flip */
+ ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32)));
+ ale_entry[index] |= (value >> ((idx2 * 32) - start));
}
start -= idx * 32;
idx = 2 - idx; /* flip */
@@ -1231,7 +1231,7 @@ int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int rateli
static void cpsw_ale_timer(struct timer_list *t)
{
- struct cpsw_ale *ale = from_timer(ale, t, timer);
+ struct cpsw_ale *ale = timer_container_of(ale, t, timer);
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
@@ -1287,7 +1287,7 @@ static void cpsw_ale_aging_stop(struct cpsw_ale *ale)
return;
}
- del_timer_sync(&ale->timer);
+ timer_delete_sync(&ale->timer);
}
void cpsw_ale_start(struct cpsw_ale *ale)
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 21d55a180ef6..bdc4db0d169c 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -434,18 +434,6 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
return -EOPNOTSUPP;
}
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
- else
- return -EOPNOTSUPP;
-}
-
int cpsw_nway_reset(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index a98bcc5eb566..8b9e2078c602 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -293,6 +293,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct page_pool *pool;
struct sk_buff *skb;
struct xdp_buff xdp;
+ u32 metasize = 0;
int ret = 0;
dma_addr_t dma;
@@ -345,13 +346,14 @@ static void cpsw_rx_handler(void *token, int len, int status)
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_prepare_buff(&xdp, pa, headroom, size, false);
+ xdp_prepare_buff(&xdp, pa, headroom, size, true);
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
if (ret != CPSW_XDP_PASS)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
@@ -368,6 +370,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->offload_fwd_mark = priv->offload_fwd_mark;
skb_reserve(skb, headroom);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
@@ -554,7 +558,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
soft_reset("cpsw", &cpsw->regs->soft_reset);
cpsw_ale_start(cpsw->ale);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&cpsw->regs->control);
@@ -778,6 +782,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = phy;
+ phy_disable_eee(slave->phy);
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -1126,7 +1132,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
- .ndo_eth_ioctl = cpsw_ndo_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
@@ -1141,6 +1147,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_bpf = cpsw_ndo_bpf,
.ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
.ndo_get_port_parent_id = cpsw_get_port_parent_id,
+ .ndo_hwtstamp_get = cpsw_hwtstamp_get,
+ .ndo_hwtstamp_set = cpsw_hwtstamp_set,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
@@ -1209,7 +1217,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee,
- .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
@@ -1408,7 +1415,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC;
- ndev->netns_local = true;
+ ndev->netns_immutable = true;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
@@ -1417,6 +1424,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
SET_NETDEV_DEV(ndev, dev);
+ ndev->dev.of_node = slave_data->slave_node;
if (!napi_ndev) {
/* CPSW Host port CPDMA interface is shared between
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 6fe4edabba44..bc4fdf17a99e 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -614,24 +614,29 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
}
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(dev);
struct cpsw_common *cpsw = priv->cpsw;
- struct hwtstamp_config cfg;
+
+ /* This will only execute if dev->see_all_hwtstamp_requests is set */
+ if (cfg->source != HWTSTAMP_SOURCE_NETDEV) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Switch mode only supports MAC timestamping");
+ return -EOPNOTSUPP;
+ }
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->rx_ts_enabled = 0;
break;
@@ -651,13 +656,13 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
- priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
+ priv->tx_ts_enabled = cfg->tx_type == HWTSTAMP_TX_ON;
switch (cpsw->version) {
case CPSW_VERSION_1:
@@ -671,65 +676,40 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
WARN_ON(1);
}
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
{
struct cpsw_common *cpsw = ndev_to_cpsw(dev);
struct cpsw_priv *priv = netdev_priv(dev);
- struct hwtstamp_config cfg;
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
- cfg.flags = 0;
- cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = priv->rx_ts_enabled;
+ cfg->tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg->rx_filter = priv->rx_ts_enabled;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
#else
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
{
return -EOPNOTSUPP;
}
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
#endif /*CONFIG_TI_CPTS*/
-int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
-{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
- struct phy_device *phy;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- phy = cpsw->slaves[slave_no].phy;
-
- if (!phy_has_hwtstamp(phy)) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_set(dev, req);
- case SIOCGHWTSTAMP:
- return cpsw_hwtstamp_get(dev, req);
- }
- }
-
- if (phy)
- return phy_mii_ioctl(phy, req, cmd);
-
- return -EOPNOTSUPP;
-}
-
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
{
struct cpsw_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 1f448290b9f4..91add8925e23 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -461,7 +461,6 @@ void soft_reset(const char *module, void __iomem *reg);
void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
int cpsw_need_resplit(struct cpsw_common *cpsw);
-int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate);
int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
@@ -469,6 +468,11 @@ bool cpsw_shp_is_off(struct cpsw_priv *priv);
void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_qos_clsflower_resume(struct cpsw_priv *priv);
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg);
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
@@ -497,7 +501,6 @@ int cpsw_get_link_ksettings(struct net_device *ndev,
int cpsw_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *ecmd);
int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata);
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering,
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 5d6d1cf78e93..2a1c43316f46 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -215,6 +215,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
+
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(cmp), 0);
}
/* enable reset counter on CMP0 event */
@@ -403,66 +406,79 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
static int icss_iep_perout_enable_hw(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
+ struct timespec64 ts;
+ u64 ns_start;
+ u64 ns_width;
int ret;
u64 cmp;
+ if (!on) {
+ /* Disable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), 0);
+
+ /* clear CMP regs */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+
+ /* Disable sync */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
+
+ return 0;
+ }
+
+ /* Calculate width of the signal for PPS/PEROUT handling */
+ ts.tv_sec = req->on.sec;
+ ts.tv_nsec = req->on.nsec;
+ ns_width = timespec64_to_ns(&ts);
+
+ if (req->flags & PTP_PEROUT_PHASE) {
+ ts.tv_sec = req->phase.sec;
+ ts.tv_nsec = req->phase.nsec;
+ ns_start = timespec64_to_ns(&ts);
+ } else {
+ ns_start = 0;
+ }
+
if (iep->ops && iep->ops->perout_enable) {
ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
if (ret)
return ret;
- if (on) {
- /* Configure CMP */
- regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
- /* Configure SYNC, 1ms pulse width */
- regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
- regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
- regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
- regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
- /* Enable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
- } else {
- /* Disable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), 0);
-
- /* clear regs */
- regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
- }
+ /* Configure CMP */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
+ /* Configure SYNC, based on req on width */
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
} else {
- if (on) {
- u64 start_ns;
-
- iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
- req->period.nsec;
- start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
- + req->period.nsec;
- icss_iep_update_to_next_boundary(iep, start_ns);
-
- /* Enable Sync in single shot mode */
- regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
- IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
- /* Enable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
- } else {
- /* Disable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), 0);
-
- /* clear CMP regs */
- regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
-
- /* Disable sync */
- regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
- }
+ u64 start_ns;
+
+ iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
+ req->period.nsec;
+ start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
+ + req->period.nsec;
+ icss_iep_update_to_next_boundary(iep, start_ns);
+
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
+ /* Enable Sync in single shot mode */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
+ IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
}
return 0;
@@ -473,6 +489,21 @@ static int icss_iep_perout_enable(struct icss_iep *iep,
{
int ret = 0;
+ if (!on)
+ goto disable;
+
+ /* Reject requests with unsupported flags */
+ if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ /* Set default "on" time (1ms) for the signal if not passed by the app */
+ if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) {
+ req->on.sec = 0;
+ req->on.nsec = NSEC_PER_MSEC;
+ }
+
+disable:
mutex_lock(&iep->ptp_clk_mutex);
if (iep->pps_enabled) {
@@ -565,10 +596,13 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
if (on) {
ns = icss_iep_gettime(iep, NULL);
ts = ns_to_timespec64(ns);
+ rq.perout.flags = 0;
rq.perout.period.sec = 1;
rq.perout.period.nsec = 0;
rq.perout.start.sec = ts.tv_sec + 2;
rq.perout.start.nsec = 0;
+ rq.perout.on.sec = 0;
+ rq.perout.on.nsec = NSEC_PER_MSEC;
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
} else {
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
@@ -780,6 +814,11 @@ int icss_iep_exit(struct icss_iep *iep)
}
icss_iep_disable(iep);
+ if (iep->pps_enabled)
+ icss_iep_pps_enable(iep, false);
+ else if (iep->perout_enabled)
+ icss_iep_perout_enable(iep, NULL, false);
+
return 0;
}
EXPORT_SYMBOL_GPL(icss_iep_exit);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index fdebeb2f84e0..12f25cec6255 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -45,6 +45,11 @@ void prueth_cleanup_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
int max_rflows)
{
+ if (rx_chn->pg_pool) {
+ page_pool_destroy(rx_chn->pg_pool);
+ rx_chn->pg_pool = NULL;
+ }
+
if (rx_chn->desc_pool)
k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
@@ -131,12 +136,13 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_tx;
struct netdev_queue *netif_txq;
+ struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
unsigned int total_bytes = 0;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
- void **swdata;
tx_chn = &emac->tx_chns[chn];
@@ -157,20 +163,26 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- /* was this command's TX complete? */
- if (emac->is_sr1 && *(swdata) == emac->cmd_data) {
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ dev_sw_netstats_tx_add(ndev, 1, xdpf->len);
+ total_bytes += xdpf->len;
+ xdp_return_frame(xdpf);
+ break;
+ default:
prueth_xmit_free(tx_chn, desc_tx);
+ ndev->stats.tx_dropped++;
continue;
}
- skb = *(swdata);
prueth_xmit_free(tx_chn, desc_tx);
-
- ndev = skb->dev;
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
num_tx++;
}
@@ -249,9 +261,8 @@ int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &emac_tx_timer_callback;
+ hrtimer_setup(&tx_chn->tx_hrtimer, &emac_tx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
ret = request_irq(tx_chn->irq, prueth_tx_irq,
IRQF_TRIGGER_HIGH, tx_chn->name,
tx_chn);
@@ -461,17 +472,17 @@ fail:
}
EXPORT_SYMBOL_GPL(prueth_init_rx_chns);
-int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn)
+int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct page *page, u32 buf_len)
{
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- u32 pkt_len = skb_tailroom(skb);
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma;
dma_addr_t buf_dma;
- void **swdata;
+ buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM;
desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
if (!desc_rx) {
netdev_err(ndev, "rx push: failed to allocate descriptor\n");
@@ -479,25 +490,19 @@ int prueth_dma_rx_push(struct prueth_emac *emac,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
- return -EINVAL;
- }
-
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
PRUETH_NAV_PS_DATA_SIZE);
k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- *swdata = skb;
+ swdata->type = PRUETH_SWDATA_PAGE;
+ swdata->data.page = page;
- return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
desc_rx, desc_dma);
}
-EXPORT_SYMBOL_GPL(prueth_dma_rx_push);
+EXPORT_SYMBOL_GPL(prueth_dma_rx_push_mapped);
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
{
@@ -535,18 +540,178 @@ void emac_rx_timestamp(struct prueth_emac *emac,
ssh->hwtstamp = ns_to_ktime(ns);
}
-static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
+/**
+ * emac_xmit_xdp_frame - transmits an XDP frame
+ * @emac: emac device
+ * @xdpf: data to transmit
+ * @page: page from page pool if already DMA mapped
+ * @q_idx: queue id
+ *
+ * Return: XDP state
+ */
+u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
+ struct xdp_frame *xdpf,
+ struct page *page,
+ unsigned int q_idx)
+{
+ struct cppi5_host_desc_t *first_desc;
+ struct net_device *ndev = emac->ndev;
+ struct netdev_queue *netif_txq;
+ struct prueth_tx_chn *tx_chn;
+ dma_addr_t desc_dma, buf_dma;
+ struct prueth_swdata *swdata;
+ u32 *epib;
+ int ret;
+
+ if (q_idx >= PRUETH_MAX_TX_QUEUES) {
+ netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx);
+ return ICSSG_XDP_CONSUMED; /* drop */
+ }
+
+ tx_chn = &emac->tx_chns[q_idx];
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n");
+ return ICSSG_XDP_CONSUMED; /* drop */
+ }
+
+ if (page) { /* already DMA mapped by page_pool */
+ buf_dma = page_pool_get_dma_addr(page);
+ buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
+ } else { /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "xdp tx: failed to map data buffer\n");
+ goto drop_free_descs; /* drop */
+ }
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, 0);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+
+ /* set dst tag to indicate internal qid at the firmware which is at
+ * bit8..bit15. bit0..bit7 indicates port num for directed
+ * packets in case of switch mode operation
+ */
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ swdata->type = PRUETH_SWDATA_XDPF;
+ swdata->data.xdpf = xdpf;
+
+ /* Report BQL before sending the packet */
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+ netdev_tx_sent_queue(netif_txq, xdpf->len);
+
+ cppi5_hdesc_set_pktlen(first_desc, xdpf->len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(ndev, "xdp tx: push failed: %d\n", ret);
+ netdev_tx_completed_queue(netif_txq, 1, xdpf->len);
+ goto drop_free_descs;
+ }
+
+ return ICSSG_XDP_TX;
+
+drop_free_descs:
+ prueth_xmit_free(tx_chn, first_desc);
+ return ICSSG_XDP_CONSUMED;
+}
+EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame);
+
+/**
+ * emac_run_xdp - run an XDP program
+ * @emac: emac device
+ * @xdp: XDP buffer containing the frame
+ * @page: page with RX data if already DMA mapped
+ * @len: Rx descriptor packet length
+ *
+ * Return: XDP state
+ */
+static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
+ struct page *page, u32 *len)
+{
+ struct net_device *ndev = emac->ndev;
+ struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ u32 pkt_len = *len;
+ u32 act, result;
+ int q_idx, err;
+
+ xdp_prog = READ_ONCE(emac->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ return ICSSG_XDP_PASS;
+ case XDP_TX:
+ /* Send packet to TX ring for immediate transmission */
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ ndev->stats.tx_dropped++;
+ goto drop;
+ }
+
+ q_idx = cpu % emac->tx_ch_num;
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+ __netif_tx_lock(netif_txq, cpu);
+ result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
+ __netif_tx_unlock(netif_txq);
+ if (result == ICSSG_XDP_CONSUMED) {
+ ndev->stats.tx_dropped++;
+ goto drop;
+ }
+
+ dev_sw_netstats_rx_add(ndev, xdpf->len);
+ return result;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(emac->ndev, xdp, xdp_prog);
+ if (err)
+ goto drop;
+
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+ return ICSSG_XDP_REDIR;
+ default:
+ bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+drop:
+ trace_xdp_exception(emac->ndev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(emac->rx_chns.pg_pool, page);
+ return ICSSG_XDP_CONSUMED;
+ }
+}
+
+static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0;
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma, buf_dma;
- void **swdata;
+ struct page *page, *new_page;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
u32 *psdata;
+ void *pa;
int ret;
+ *xdp_state = 0;
+ pool = rx_chn->pg_pool;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
if (ret) {
if (ret != -ENODATA)
@@ -558,15 +723,15 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
return 0;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
-
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
-
- psdata = cppi5_hdesc_get_psdata(desc_rx);
- /* RX HW timestamp */
- if (emac->rx_ts_enabled)
- emac_rx_timestamp(emac, skb, psdata);
+ if (swdata->type != PRUETH_SWDATA_PAGE) {
+ netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ return 0;
+ }
+ page = swdata->data.page;
+ page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE);
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -574,32 +739,63 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
pkt_len -= 4;
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- skb->dev = ndev;
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
/* if allocation fails we drop the packet but push the
- * descriptor back to the ring with old skb to prevent a stall
+ * descriptor back to the ring with old page to prevent a stall
*/
- if (!new_skb) {
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
ndev->stats.rx_dropped++;
- new_skb = skb;
+ goto requeue;
+ }
+
+ pa = page_address(page);
+ if (emac->xdp_prog) {
+ xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
+ xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
+
+ *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
+ if (*xdp_state == ICSSG_XDP_PASS)
+ skb = xdp_build_skb_from_buff(&xdp);
+ else
+ goto requeue;
} else {
- /* send the filled skb up the n/w stack */
- skb_put(skb, pkt_len);
- if (emac->prueth->is_switch_mode)
- skb->offload_fwd_mark = emac->offload_fwd_mark;
- skb->protocol = eth_type_trans(skb, ndev);
- napi_gro_receive(&emac->napi_rx, skb);
- ndev->stats.rx_bytes += pkt_len;
- ndev->stats.rx_packets++;
+ /* prepare skb and send to n/w stack */
+ skb = napi_build_skb(pa, PAGE_SIZE);
}
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
+ }
+
+ skb_reserve(skb, PRUETH_HEADROOM);
+ skb_put(skb, pkt_len);
+ skb->dev = ndev;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb_mark_for_recycle(skb);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+
+requeue:
/* queue another RX DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
+ PRUETH_MAX_PKT_SIZE);
if (WARN_ON(ret < 0)) {
- dev_kfree_skb_any(new_skb);
+ page_pool_recycle_direct(pool, new_page);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -611,22 +807,19 @@ static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
- dma_addr_t buf_dma;
- u32 buf_dma_len;
- void **swdata;
+ struct prueth_swdata *swdata;
+ struct page_pool *pool;
+ struct page *page;
+ pool = rx_chn->pg_pool;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ if (swdata->type == PRUETH_SWDATA_PAGE) {
+ page = swdata->data.page;
+ page_pool_recycle_direct(pool, page);
+ }
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- dev_kfree_skb_any(skb);
}
static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
@@ -662,13 +855,13 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
struct netdev_queue *netif_txq;
+ struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
u32 pkt_len, dst_tag_id;
int i, ret = 0, q_idx;
bool in_tx_ts = 0;
int tx_ts_cookie;
- void **swdata;
u32 *epib;
pkt_len = skb_headlen(skb);
@@ -730,7 +923,8 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = skb;
+ swdata->type = PRUETH_SWDATA_SKB;
+ swdata->data.skb = skb;
/* Handle the case where skb is fragmented in pages */
cur_desc = first_desc;
@@ -780,6 +974,7 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
if (ret) {
netdev_err(ndev, "tx: push failed: %d\n", ret);
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
goto drop_free_descs;
}
@@ -833,15 +1028,27 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_tx_chn *tx_chn = data;
struct cppi5_host_desc_t *desc_tx;
+ struct prueth_swdata *swdata;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
- void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- prueth_xmit_free(tx_chn, desc_tx);
- dev_kfree_skb_any(skb);
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_kfree_skb_any(skb);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ xdp_return_frame(xdpf);
+ break;
+ default:
+ break;
+ }
+
+ prueth_xmit_free(tx_chn, desc_tx);
}
irqreturn_t prueth_rx_irq(int irq, void *dev_id)
@@ -855,31 +1062,6 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(prueth_rx_irq);
-void prueth_emac_stop(struct prueth_emac *emac)
-{
- struct prueth *prueth = emac->prueth;
- int slice;
-
- switch (emac->port_id) {
- case PRUETH_PORT_MII0:
- slice = ICSS_SLICE0;
- break;
- case PRUETH_PORT_MII1:
- slice = ICSS_SLICE1;
- break;
- default:
- netdev_err(emac->ndev, "invalid port\n");
- return;
- }
-
- emac->fw_running = 0;
- if (!emac->is_sr1)
- rproc_shutdown(prueth->txpru[slice]);
- rproc_shutdown(prueth->rtu[slice]);
- rproc_shutdown(prueth->pru[slice]);
-}
-EXPORT_SYMBOL_GPL(prueth_emac_stop);
-
void prueth_cleanup_tx_ts(struct prueth_emac *emac)
{
int i;
@@ -900,15 +1082,18 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
int flow = emac->is_sr1 ?
PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
+ int xdp_state_or = 0;
int num_rx = 0;
int cur_budget;
+ u32 xdp_state;
int ret;
while (flow--) {
cur_budget = budget - num_rx;
while (cur_budget--) {
- ret = emac_rx_packet(emac, flow);
+ ret = emac_rx_packet(emac, flow, &xdp_state);
+ xdp_state_or |= xdp_state;
if (ret)
break;
num_rx++;
@@ -918,6 +1103,9 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
break;
}
+ if (xdp_state_or & ICSSG_XDP_REDIR)
+ xdp_do_flush();
+
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
if (unlikely(emac->rx_pace_timeout_ns)) {
hrtimer_start(&emac->rx_hrtimer,
@@ -932,29 +1120,71 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
}
EXPORT_SYMBOL_GPL(icssg_napi_rx_poll);
+static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
+ struct device *dma_dev,
+ int size)
+{
+ struct page_pool_params pp_params = { 0 };
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = size;
+ pp_params.nid = dev_to_node(emac->prueth->dev);
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = dma_dev;
+ pp_params.napi = &emac->napi_rx;
+ pp_params.max_len = PAGE_SIZE;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ netdev_err(emac->ndev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
int prueth_prepare_rx_chan(struct prueth_emac *emac,
struct prueth_rx_chn *chn,
int buf_size)
{
- struct sk_buff *skb;
+ struct page_pool *pool;
+ struct page *page;
int i, ret;
+ pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ chn->pg_pool = pool;
+
for (i = 0; i < chn->descs_num; i++) {
- skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ /* NOTE: we're not using memory efficiently here.
+ * 1 full page (4KB?) used here instead of
+ * PRUETH_MAX_PKT_SIZE (~1.5KB?)
+ */
+ page = page_pool_dev_alloc_pages(pool);
+ if (!page) {
+ netdev_err(emac->ndev, "couldn't allocate rx page\n");
+ ret = -ENOMEM;
+ goto recycle_alloc_pg;
+ }
- ret = prueth_dma_rx_push(emac, skb, chn);
+ ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
if (ret < 0) {
netdev_err(emac->ndev,
- "cannot submit skb for rx chan %s ret %d\n",
+ "cannot submit page for rx chan %s ret %d\n",
chn->name, ret);
- kfree_skb(skb);
- return ret;
+ page_pool_recycle_direct(pool, page);
+ goto recycle_alloc_pg;
}
}
return 0;
+
+recycle_alloc_pg:
+ prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan);
@@ -980,7 +1210,7 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
for (i = 0; i < num_flows; i++)
k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
- prueth_rx_cleanup, !!i);
+ prueth_rx_cleanup);
if (disable)
k3_udma_glue_disable_rx_chn(chn->rx_chn);
}
@@ -1084,10 +1314,28 @@ void icssg_ndo_get_stats64(struct net_device *ndev,
stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
- stats->rx_errors = ndev->stats.rx_errors;
- stats->rx_dropped = ndev->stats.rx_dropped;
+ stats->rx_errors = ndev->stats.rx_errors +
+ emac_get_stat_by_name(emac, "FW_RX_ERROR") +
+ emac_get_stat_by_name(emac, "FW_RX_EOF_SHORT_FRMERR") +
+ emac_get_stat_by_name(emac, "FW_RX_B0_DROP_EARLY_EOF") +
+ emac_get_stat_by_name(emac, "FW_RX_EXP_FRAG_Q_DROP") +
+ emac_get_stat_by_name(emac, "FW_RX_FIFO_OVERRUN");
+ stats->rx_dropped = ndev->stats.rx_dropped +
+ emac_get_stat_by_name(emac, "FW_DROPPED_PKT") +
+ emac_get_stat_by_name(emac, "FW_INF_PORT_DISABLED") +
+ emac_get_stat_by_name(emac, "FW_INF_SAV") +
+ emac_get_stat_by_name(emac, "FW_INF_SA_DL") +
+ emac_get_stat_by_name(emac, "FW_INF_PORT_BLOCKED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_TAGGED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_PRIOTAGGED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_NOTAG") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_NOTMEMBER");
stats->tx_errors = ndev->stats.tx_errors;
- stats->tx_dropped = ndev->stats.tx_dropped;
+ stats->tx_dropped = ndev->stats.tx_dropped +
+ emac_get_stat_by_name(emac, "FW_RTU_PKT_DROP") +
+ emac_get_stat_by_name(emac, "FW_TX_DROPPED_PACKET") +
+ emac_get_stat_by_name(emac, "FW_TX_TS_DROPPED_PACKET") +
+ emac_get_stat_by_name(emac, "FW_TX_JUMBO_FRM_CUTOFF");
}
EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index 5d2491c2943a..ddfd1c02a885 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -397,7 +397,7 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
return 0;
}
-static void icssg_init_emac_mode(struct prueth *prueth)
+void icssg_init_emac_mode(struct prueth *prueth)
{
/* When the device is configured as a bridge and it is being brought
* back to the emac mode, the host mac address has to be set as 0.
@@ -406,9 +406,6 @@ static void icssg_init_emac_mode(struct prueth *prueth)
int i;
u8 mac[ETH_ALEN] = { 0 };
- if (prueth->emacs_initialized)
- return;
-
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
@@ -423,15 +420,13 @@ static void icssg_init_emac_mode(struct prueth *prueth)
/* Clear host MAC address */
icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
}
+EXPORT_SYMBOL_GPL(icssg_init_emac_mode);
-static void icssg_init_fw_offload_mode(struct prueth *prueth)
+void icssg_init_fw_offload_mode(struct prueth *prueth)
{
u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
int i;
- if (prueth->emacs_initialized)
- return;
-
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
@@ -448,6 +443,7 @@ static void icssg_init_fw_offload_mode(struct prueth *prueth)
icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
}
+EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode);
int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
{
@@ -455,11 +451,6 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
struct icssg_flow_cfg __iomem *flow_cfg;
int ret;
- if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
- icssg_init_fw_offload_mode(prueth);
- else
- icssg_init_emac_mode(prueth);
-
memset_io(config, 0, TAS_GATE_MASK_LIST0);
icssg_miig_queues_init(prueth, slice);
@@ -786,3 +777,27 @@ void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
}
EXPORT_SYMBOL_GPL(icssg_set_pvid);
+
+int emac_fdb_flow_id_updated(struct prueth_emac *emac)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ int slice = prueth_emac_slice(emac);
+ struct mgmt_cmd fdb_cmd = { 0 };
+ int ret;
+
+ fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
+ fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW;
+ fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
+ fdb_cmd.param = 0;
+
+ fdb_cmd.param |= (slice << 4);
+ fdb_cmd.cmd_args[0] = 0;
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+ return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index 92c2deaa3068..c884e9fa099e 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -55,6 +55,7 @@ struct icssg_rxq_ctx {
#define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03
#define ICSSG_FW_MGMT_CMD_TYPE 0x04
#define ICSSG_FW_MGMT_PKT 0x80000000
+#define ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW 0x05
struct icssg_r30_cmd {
u32 cmd[4];
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index c568c84a032b..86fc1278127c 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -164,11 +164,26 @@ static struct icssg_firmwares icssg_emac_firmwares[] = {
}
};
-static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+static int prueth_start(struct rproc *rproc, const char *fw_name)
+{
+ int ret;
+
+ ret = rproc_set_firmware(rproc, fw_name);
+ if (ret)
+ return ret;
+ return rproc_boot(rproc);
+}
+
+static void prueth_shutdown(struct rproc *rproc)
+{
+ rproc_shutdown(rproc);
+}
+
+static int prueth_emac_start(struct prueth *prueth)
{
struct icssg_firmwares *firmwares;
struct device *dev = prueth->dev;
- int slice, ret;
+ int ret, slice;
if (prueth->is_switch_mode)
firmwares = icssg_switch_firmwares;
@@ -177,49 +192,126 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
else
firmwares = icssg_emac_firmwares;
- slice = prueth_emac_slice(emac);
- if (slice < 0) {
- netdev_err(emac->ndev, "invalid port\n");
- return -EINVAL;
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
+ if (ret) {
+ dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
+ goto unwind_slices;
+ }
+
+ ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
+ if (ret) {
+ dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
+ rproc_shutdown(prueth->pru[slice]);
+ goto unwind_slices;
+ }
+
+ ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
+ if (ret) {
+ dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+ goto unwind_slices;
+ }
}
- ret = icssg_config(prueth, emac, slice);
- if (ret)
- return ret;
+ return 0;
- ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
- ret = rproc_boot(prueth->pru[slice]);
- if (ret) {
- dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
- return -EINVAL;
+unwind_slices:
+ while (--slice >= 0) {
+ prueth_shutdown(prueth->txpru[slice]);
+ prueth_shutdown(prueth->rtu[slice]);
+ prueth_shutdown(prueth->pru[slice]);
}
- ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
- ret = rproc_boot(prueth->rtu[slice]);
- if (ret) {
- dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
- goto halt_pru;
+ return ret;
+}
+
+static void prueth_emac_stop(struct prueth *prueth)
+{
+ int slice;
+
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ prueth_shutdown(prueth->txpru[slice]);
+ prueth_shutdown(prueth->rtu[slice]);
+ prueth_shutdown(prueth->pru[slice]);
}
+}
+
+static int prueth_emac_common_start(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+ int ret = 0;
+ int slice;
+
+ if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
+ return -EINVAL;
- ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
- ret = rproc_boot(prueth->txpru[slice]);
+ /* clear SMEM and MSMC settings for all slices */
+ memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
+ memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
+
+ icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
+ icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
+
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ icssg_init_fw_offload_mode(prueth);
+ else
+ icssg_init_emac_mode(prueth);
+
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ emac = prueth->emac[slice];
+ if (!emac)
+ continue;
+ ret = icssg_config(prueth, emac, slice);
+ if (ret)
+ goto disable_class;
+ }
+
+ ret = prueth_emac_start(prueth);
+ if (ret)
+ goto disable_class;
+
+ emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
+ prueth->emac[ICSS_SLICE1];
+ ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
+ emac, IEP_DEFAULT_CYCLE_TIME_NS);
if (ret) {
- dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
- goto halt_rtu;
+ dev_err(prueth->dev, "Failed to initialize IEP module\n");
+ goto stop_pruss;
}
- emac->fw_running = 1;
return 0;
-halt_rtu:
- rproc_shutdown(prueth->rtu[slice]);
+stop_pruss:
+ prueth_emac_stop(prueth);
-halt_pru:
- rproc_shutdown(prueth->pru[slice]);
+disable_class:
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
return ret;
}
+static int prueth_emac_common_stop(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+
+ if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
+ return -EINVAL;
+
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
+
+ prueth_emac_stop(prueth);
+
+ emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
+ prueth->emac[ICSS_SLICE1];
+ icss_iep_exit(emac->iep);
+
+ return 0;
+}
+
/* called back by PHY layer if there is change in link state of hw port*/
static void emac_adjust_link(struct net_device *ndev)
{
@@ -374,9 +466,6 @@ static void prueth_iep_settime(void *clockops_data, u64 ns)
u32 cycletime;
int timeout;
- if (!emac->fw_running)
- return;
-
sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
@@ -470,63 +559,163 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable,
};
+static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+ struct page_pool *pool = emac->rx_chns.pg_pool;
+ int ret;
+
+ ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
+ if (ret)
+ return ret;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ xdp_rxq_info_unreg(rxq);
+
+ return ret;
+}
+
+static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+
+ if (!xdp_rxq_info_is_reg(rxq))
+ return;
+
+ xdp_rxq_info_unreg(rxq);
+}
+
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- int port_mask = BIT(emac->port_id);
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ int port_mask;
+ u8 vlan_id;
- port_mask |= icssg_fdb_lookup(emac, addr, 0);
- icssg_fdb_add_del(emac, addr, 0, port_mask, true);
- icssg_vtbl_modify(emac, 0, port_mask, port_mask, true);
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ emac = netdev_priv(real_dev);
+
+ port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
+ icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
+ icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
return 0;
}
static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- int port_mask = BIT(emac->port_id);
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
int other_port_mask;
+ int port_mask;
+ u8 vlan_id;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ emac = netdev_priv(real_dev);
- other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0);
+ port_mask = BIT(emac->port_id);
+ other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
- icssg_fdb_add_del(emac, addr, 0, port_mask, false);
- icssg_vtbl_modify(emac, 0, port_mask, port_mask, false);
+ icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
+ icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
if (other_port_mask) {
- icssg_fdb_add_del(emac, addr, 0, other_port_mask, true);
- icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true);
+ icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
+ icssg_vtbl_modify(emac, vlan_id, other_port_mask,
+ other_port_mask, true);
}
return 0;
}
-static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
+ const u8 *addr, u8 vid, bool add)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct prueth *prueth = emac->prueth;
-
- icssg_fdb_add_del(emac, addr, prueth->default_vlan,
+ icssg_fdb_add_del(emac, addr, vid,
ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK, true);
+ ICSSG_FDB_ENTRY_BLOCK, add);
+
+ if (add)
+ icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
+ BIT(emac->port_id), add);
+}
+
+static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+ emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
+ if (!emac)
+ return -EINVAL;
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ true);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
+ }
- icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id),
- BIT(emac->port_id), true);
return 0;
}
static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct prueth *prueth = emac->prueth;
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+ emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
+ if (!emac)
+ return -EINVAL;
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ false);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
+ }
- icssg_fdb_add_del(emac, addr, prueth->default_vlan,
- ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK, false);
+ return 0;
+}
+
+static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
+ void *args)
+{
+ struct prueth_emac *emac = args;
+
+ if (!vdev || !vid)
+ return 0;
+
+ netif_addr_lock_bh(vdev);
+ __hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
+ vdev->addr_len);
+ netif_addr_unlock_bh(vdev);
+
+ if (emac->prueth->is_hsr_offload_mode)
+ __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
+ icssg_prueth_hsr_add_mcast,
+ icssg_prueth_hsr_del_mcast);
+ else
+ __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
+ icssg_prueth_add_mcast,
+ icssg_prueth_del_mcast);
return 0;
}
@@ -543,23 +732,17 @@ static int emac_ndo_open(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
int ret, i, num_data_chn = emac->tx_ch_num;
+ struct icssg_flow_cfg __iomem *flow_cfg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
struct device *dev = prueth->dev;
int max_rx_flows;
int rx_flow;
- /* clear SMEM and MSMC settings for all slices */
- if (!prueth->emacs_initialized) {
- memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
- memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
- }
-
/* set h/w MAC as user might have re-configured */
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
- icssg_class_default(prueth->miig_rt, slice, 0, false);
icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
/* Notify the stack of the actual queue counts. */
@@ -597,18 +780,23 @@ static int emac_ndo_open(struct net_device *ndev)
goto cleanup_napi;
}
- /* reset and start PRU firmware */
- ret = prueth_emac_start(prueth, emac);
- if (ret)
- goto free_rx_irq;
+ if (!prueth->emacs_initialized) {
+ ret = prueth_emac_common_start(prueth);
+ if (ret)
+ goto free_rx_irq;
+ }
- icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+ flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
+ writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
+ ret = emac_fdb_flow_id_updated(emac);
- if (!prueth->emacs_initialized) {
- ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
- emac, IEP_DEFAULT_CYCLE_TIME_NS);
+ if (ret) {
+ netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
+ goto stop;
}
+ icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+
ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
IRQF_ONESHOT, dev_name(dev), emac);
if (ret)
@@ -619,10 +807,14 @@ static int emac_ndo_open(struct net_device *ndev)
if (ret)
goto free_tx_ts_irq;
- ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ ret = prueth_create_xdp_rxqs(emac);
if (ret)
goto reset_rx_chn;
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto destroy_xdp_rxqs;
+
for (i = 0; i < emac->tx_ch_num; i++) {
ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
if (ret)
@@ -648,12 +840,15 @@ reset_tx_chan:
* any SKB for completion. So set false to free_skb
*/
prueth_reset_tx_chan(emac, i, false);
+destroy_xdp_rxqs:
+ prueth_destroy_xdp_rxqs(emac);
reset_rx_chn:
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
free_tx_ts_irq:
free_irq(emac->tx_ts_irq, emac);
stop:
- prueth_emac_stop(emac);
+ if (!prueth->emacs_initialized)
+ prueth_emac_common_stop(prueth);
free_rx_irq:
free_irq(emac->rx_chns.irq[rx_flow], emac);
cleanup_napi:
@@ -689,8 +884,6 @@ static int emac_ndo_stop(struct net_device *ndev)
if (ndev->phydev)
phy_stop(ndev->phydev);
- icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
-
if (emac->prueth->is_hsr_offload_mode)
__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
else
@@ -719,7 +912,7 @@ static int emac_ndo_stop(struct net_device *ndev)
k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
-
+ prueth_destroy_xdp_rxqs(emac);
napi_disable(&emac->napi_rx);
hrtimer_cancel(&emac->rx_hrtimer);
@@ -728,11 +921,9 @@ static int emac_ndo_stop(struct net_device *ndev)
/* Destroying the queued work in ndo_stop() */
cancel_delayed_work_sync(&emac->stats_work);
- if (prueth->emacs_initialized == 1)
- icss_iep_exit(emac->iep);
-
/* stop PRUs */
- prueth_emac_stop(emac);
+ if (prueth->emacs_initialized == 1)
+ prueth_emac_common_stop(prueth);
free_irq(emac->tx_ts_irq, emac);
@@ -772,12 +963,22 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work)
return;
}
- if (emac->prueth->is_hsr_offload_mode)
+ if (emac->prueth->is_hsr_offload_mode) {
__dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
icssg_prueth_hsr_del_mcast);
- else
+ if (rtnl_trylock()) {
+ vlan_for_each(emac->prueth->hsr_dev,
+ icssg_update_vlan_mcast, emac);
+ rtnl_unlock();
+ }
+ } else {
__dev_mc_sync(ndev, icssg_prueth_add_mcast,
icssg_prueth_del_mcast);
+ if (rtnl_trylock()) {
+ vlan_for_each(ndev, icssg_update_vlan_mcast, emac);
+ rtnl_unlock();
+ }
+ }
}
/**
@@ -822,19 +1023,19 @@ static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ int port_mask = BIT(emac->port_id);
int untag_mask = 0;
- int port_mask;
- if (prueth->is_hsr_offload_mode) {
- port_mask = BIT(PRUETH_PORT_HOST) | BIT(emac->port_id);
- untag_mask = 0;
+ if (prueth->is_hsr_offload_mode)
+ port_mask |= BIT(PRUETH_PORT_HOST);
- netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
- vid, port_mask, untag_mask);
+ __hw_addr_init(&emac->vlan_mcast_list[vid]);
+ netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
+ vid, port_mask, untag_mask);
+
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
+ icssg_set_pvid(emac->prueth, vid, emac->port_id);
- icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
- icssg_set_pvid(emac->prueth, vid, emac->port_id);
- }
return 0;
}
@@ -843,21 +1044,106 @@ static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ int port_mask = BIT(emac->port_id);
int untag_mask = 0;
- int port_mask;
- if (prueth->is_hsr_offload_mode) {
+ if (prueth->is_hsr_offload_mode)
port_mask = BIT(PRUETH_PORT_HOST);
- untag_mask = 0;
- netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n",
- vid, port_mask, untag_mask);
+ netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n",
+ vid, port_mask, untag_mask);
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
- icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
+ return 0;
+}
+
+/**
+ * emac_xdp_xmit - Implements ndo_xdp_xmit
+ * @dev: netdev
+ * @n: number of frames
+ * @frames: array of XDP buffer pointers
+ * @flags: XDP extra info
+ *
+ * Return: number of frames successfully sent. Failed frames
+ * will be free'ed by XDP core.
+ *
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ **/
+static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct prueth_emac *emac = netdev_priv(dev);
+ struct net_device *ndev = emac->ndev;
+ struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
+ struct xdp_frame *xdpf;
+ unsigned int q_idx;
+ int nxmit = 0;
+ u32 err;
+ int i;
+
+ q_idx = cpu % emac->tx_ch_num;
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ __netif_tx_lock(netif_txq, cpu);
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
+ if (err != ICSSG_XDP_TX) {
+ ndev->stats.tx_dropped++;
+ break;
+ }
+ nxmit++;
}
+ __netif_tx_unlock(netif_txq);
+
+ return nxmit;
+}
+
+/**
+ * emac_xdp_setup - add/remove an XDP program
+ * @emac: emac device
+ * @bpf: XDP program
+ *
+ * Return: Always 0 (Success)
+ **/
+static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog;
+
+ if (!emac->xdpi.prog && !prog)
+ return 0;
+
+ WRITE_ONCE(emac->xdp_prog, prog);
+
+ xdp_attachment_setup(&emac->xdpi, bpf);
+
return 0;
}
+/**
+ * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
+ * @ndev: network adapter device
+ * @bpf: XDP program
+ *
+ * Return: 0 on success, error code on failure.
+ **/
+static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return emac_xdp_setup(emac, bpf);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -872,6 +1158,8 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_fix_features = emac_ndo_fix_features,
.ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
+ .ndo_bpf = emac_ndo_bpf,
+ .ndo_xdp_xmit = emac_xdp_xmit,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -900,6 +1188,8 @@ static int prueth_netdev_init(struct prueth *prueth,
emac->prueth = prueth;
emac->ndev = ndev;
emac->port_id = port;
+ emac->xdp_prog = NULL;
+ emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
if (!emac->cmd_wq) {
ret = -ENOMEM;
@@ -1001,11 +1291,14 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->hw_features = NETIF_F_SG;
ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
+ xdp_set_features_flag(ndev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT);
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
- hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- emac->rx_hrtimer.function = &emac_rx_timer_callback;
+ hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
prueth->emac[mac] = emac;
return 0;
@@ -1053,10 +1346,11 @@ static void prueth_offload_fwd_mark_update(struct prueth *prueth)
}
}
-static void prueth_emac_restart(struct prueth *prueth)
+static int prueth_emac_restart(struct prueth *prueth)
{
struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
+ int ret;
/* Detach the net_device for both PRUeth ports*/
if (netif_running(emac0->ndev))
@@ -1065,36 +1359,46 @@ static void prueth_emac_restart(struct prueth *prueth)
netif_device_detach(emac1->ndev);
/* Disable both PRUeth ports */
- icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
- icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
+ ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
+ ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
/* Stop both pru cores for both PRUeth ports*/
- prueth_emac_stop(emac0);
- prueth->emacs_initialized--;
- prueth_emac_stop(emac1);
- prueth->emacs_initialized--;
+ ret = prueth_emac_common_stop(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to stop the firmwares");
+ return ret;
+ }
/* Start both pru cores for both PRUeth ports */
- prueth_emac_start(prueth, emac0);
- prueth->emacs_initialized++;
- prueth_emac_start(prueth, emac1);
- prueth->emacs_initialized++;
+ ret = prueth_emac_common_start(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to start the firmwares");
+ return ret;
+ }
/* Enable forwarding for both PRUeth ports */
- icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
- icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
+ ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
+ ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
/* Attache net_device for both PRUeth ports */
netif_device_attach(emac0->ndev);
netif_device_attach(emac1->ndev);
+
+ return ret;
}
static void icssg_change_mode(struct prueth *prueth)
{
struct prueth_emac *emac;
- int mac;
+ int mac, ret;
- prueth_emac_restart(prueth);
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
+ return;
+ }
for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
emac = prueth->emac[mac];
@@ -1158,7 +1462,7 @@ static int prueth_netdevice_port_link(struct net_device *ndev,
if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
prueth->br_members & BIT(PRUETH_PORT_MII1)) {
prueth->is_switch_mode = true;
- prueth->default_vlan = 1;
+ prueth->default_vlan = PRUETH_DFLT_VLAN_SW;
emac->port_vlan = prueth->default_vlan;
icssg_change_mode(prueth);
}
@@ -1173,13 +1477,18 @@ static void prueth_netdevice_port_unlink(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ int ret;
prueth->br_members &= ~BIT(emac->port_id);
if (prueth->is_switch_mode) {
prueth->is_switch_mode = false;
emac->port_vlan = 0;
- prueth_emac_restart(prueth);
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
+ return;
+ }
}
prueth_offload_fwd_mark_update(prueth);
@@ -1211,7 +1520,7 @@ static int prueth_hsr_port_link(struct net_device *ndev)
NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
return -EOPNOTSUPP;
prueth->is_hsr_offload_mode = true;
- prueth->default_vlan = 1;
+ prueth->default_vlan = PRUETH_DFLT_VLAN_HSR;
emac0->port_vlan = prueth->default_vlan;
emac1->port_vlan = prueth->default_vlan;
icssg_change_mode(prueth);
@@ -1228,6 +1537,7 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
struct prueth *prueth = emac->prueth;
struct prueth_emac *emac0;
struct prueth_emac *emac1;
+ int ret;
emac0 = prueth->emac[PRUETH_MAC0];
emac1 = prueth->emac[PRUETH_MAC1];
@@ -1238,7 +1548,11 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
emac0->port_vlan = 0;
emac1->port_vlan = 0;
prueth->hsr_dev = NULL;
- prueth_emac_restart(prueth);
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
+ return;
+ }
netdev_dbg(ndev, "Disabling HSR Offload mode\n");
}
}
@@ -1335,6 +1649,9 @@ static int prueth_probe(struct platform_device *pdev)
np = dev->of_node;
+ BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE),
+ "insufficient SW_DATA size");
+
prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
if (!prueth)
return -ENOMEM;
@@ -1413,13 +1730,10 @@ static int prueth_probe(struct platform_device *pdev)
prueth->pa_stats = NULL;
}
- if (eth0_node) {
+ if (eth0_node || eth1_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
if (ret)
goto put_cores;
- }
-
- if (eth1_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
if (ret)
goto put_cores;
@@ -1495,6 +1809,7 @@ static int prueth_probe(struct platform_device *pdev)
}
spin_lock_init(&prueth->vtbl_lock);
+ spin_lock_init(&prueth->stats_lock);
/* setup netdev interfaces */
if (eth0_node) {
ret = prueth_netdev_init(prueth, eth0_node);
@@ -1618,14 +1933,12 @@ put_pruss:
pruss_put(prueth->pruss);
put_cores:
- if (eth1_node) {
- prueth_put_cores(prueth, ICSS_SLICE1);
- of_node_put(eth1_node);
- }
-
- if (eth0_node) {
+ if (eth0_node || eth1_node) {
prueth_put_cores(prueth, ICSS_SLICE0);
of_node_put(eth0_node);
+
+ prueth_put_cores(prueth, ICSS_SLICE1);
+ of_node_put(eth1_node);
}
return ret;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index f5c1d473e9f9..23c465f1ce7f 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -8,6 +8,8 @@
#ifndef __NET_TI_ICSSG_PRUETH_H
#define __NET_TI_ICSSG_PRUETH_H
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/etherdevice.h>
#include <linux/genalloc.h>
#include <linux/if_vlan.h>
@@ -33,6 +35,8 @@
#include <linux/dma/k3-udma-glue.h>
#include <net/devlink.h>
+#include <net/xdp.h>
+#include <net/page_pool/helpers.h>
#include "icssg_config.h"
#include "icss_iep.h"
@@ -50,7 +54,7 @@
#define ICSSG_MAX_RFLOWS 8 /* per slice */
-#define ICSSG_NUM_PA_STATS 4
+#define ICSSG_NUM_PA_STATS 32
#define ICSSG_NUM_MIIG_STATS 60
/* Number of ICSSG related stats */
#define ICSSG_NUM_STATS (ICSSG_NUM_MIIG_STATS + ICSSG_NUM_PA_STATS)
@@ -83,6 +87,12 @@
#define ICSS_CMD_ADD_FILTER 0x7
#define ICSS_CMD_ADD_MAC 0x8
+/* VLAN Filtering Related MACROs */
+#define PRUETH_DFLT_VLAN_HSR 1
+#define PRUETH_DFLT_VLAN_SW 1
+#define PRUETH_DFLT_VLAN_MAC 0
+#define MAX_VLAN_ID 256
+
/* In switch mode there are 3 real ports i.e. 3 mac addrs.
* however Linux sees only the host side port. The other 2 ports
* are the switch ports.
@@ -125,6 +135,26 @@ struct prueth_rx_chn {
u32 descs_num;
unsigned int irq[ICSSG_MAX_RFLOWS]; /* separate irq per flow */
char name[32];
+ struct page_pool *pg_pool;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+enum prueth_swdata_type {
+ PRUETH_SWDATA_INVALID = 0,
+ PRUETH_SWDATA_SKB,
+ PRUETH_SWDATA_PAGE,
+ PRUETH_SWDATA_CMD,
+ PRUETH_SWDATA_XDPF,
+};
+
+struct prueth_swdata {
+ enum prueth_swdata_type type;
+ union prueth_data {
+ struct sk_buff *skb;
+ struct page *page;
+ u32 cmd;
+ struct xdp_frame *xdpf;
+ } data;
};
/* There are 4 Tx DMA channels, but the highest priority is CH3 (thread 3)
@@ -134,13 +164,18 @@ struct prueth_rx_chn {
#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */
+/* XDP BPF state */
+#define ICSSG_XDP_PASS 0
+#define ICSSG_XDP_CONSUMED BIT(0)
+#define ICSSG_XDP_TX BIT(1)
+#define ICSSG_XDP_REDIR BIT(2)
+
/* Minimum coalesce time in usecs for both Tx and Rx */
#define ICSSG_MIN_COALESCE_USECS 20
/* data for each emac port */
struct prueth_emac {
bool is_sr1;
- bool fw_running;
struct prueth *prueth;
struct net_device *ndev;
u8 mac_addr[6];
@@ -201,8 +236,16 @@ struct prueth_emac {
/* RX IRQ Coalescing Related */
struct hrtimer rx_hrtimer;
unsigned long rx_pace_timeout_ns;
+
+ struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
+ struct bpf_prog *xdp_prog;
+ struct xdp_attachment_info xdpi;
};
+/* The buf includes headroom compatible with both skb and xdpf */
+#define PRUETH_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
+#define PRUETH_HEADROOM ALIGN(PRUETH_HEADROOM_NA, sizeof(long))
+
/**
* struct prueth_pdata - PRUeth platform data
* @fdqring_mode: Free desc queue mode
@@ -298,6 +341,8 @@ struct prueth {
int default_vlan;
/** @vtbl_lock: Lock for vtbl in shared memory */
spinlock_t vtbl_lock;
+ /** @stats_lock: Lock for reading icssg stats */
+ spinlock_t stats_lock;
};
struct emac_tx_ts_response {
@@ -361,6 +406,8 @@ int icssg_set_port_state(struct prueth_emac *emac,
enum icssg_port_state_cmd state);
void icssg_config_set_speed(struct prueth_emac *emac);
void icssg_config_half_duplex(struct prueth_emac *emac);
+void icssg_init_emac_mode(struct prueth *prueth);
+void icssg_init_fw_offload_mode(struct prueth *prueth);
/* Buffer queue helpers */
int icssg_queue_pop(struct prueth *prueth, u8 queue);
@@ -377,6 +424,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
u8 untag_mask, bool add);
u16 icssg_get_pvid(struct prueth_emac *emac);
void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port);
+int emac_fdb_flow_id_updated(struct prueth_emac *emac);
#define prueth_napi_to_tx_chn(pnapi) \
container_of(pnapi, struct prueth_tx_chn, napi_tx)
@@ -400,14 +448,14 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
char *name, u32 max_rflows,
u32 max_desc_num);
-int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn);
+int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct page *page, u32 buf_len);
+unsigned int prueth_rxbuf_total_len(unsigned int len);
void emac_rx_timestamp(struct prueth_emac *emac,
struct sk_buff *skb, u32 *psdata);
enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
irqreturn_t prueth_rx_irq(int irq, void *dev_id);
-void prueth_emac_stop(struct prueth_emac *emac);
void prueth_cleanup_tx_ts(struct prueth_emac *emac);
int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget);
int prueth_prepare_rx_chan(struct prueth_emac *emac,
@@ -432,5 +480,9 @@ void prueth_put_cores(struct prueth *prueth, int slice);
/* Revision specific helper */
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
+u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
+ struct xdp_frame *xdpf,
+ struct page *page,
+ unsigned int q_idx);
#endif /* __NET_TI_ICSSG_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 5024f0647a0d..ff5f41bf499e 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -84,7 +84,7 @@ static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
__le32 *data = emac->cmd_data;
dma_addr_t desc_dma, buf_dma;
struct prueth_tx_chn *tx_chn;
- void **swdata;
+ struct prueth_swdata *swdata;
int ret = 0;
u32 *epib;
@@ -122,7 +122,8 @@ static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = data;
+ swdata->type = PRUETH_SWDATA_CMD;
+ swdata->data.cmd = le32_to_cpu(data[0]);
cppi5_hdesc_set_pktlen(first_desc, pkt_len);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
@@ -268,16 +269,16 @@ static int emac_phy_connect(struct prueth_emac *emac)
* Returns skb pointer if packet found else NULL
* Caller must free the returned skb.
*/
-static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
- u32 flow_id)
+static struct page *prueth_process_rx_mgm(struct prueth_emac *emac,
+ u32 flow_id)
{
struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
+ struct page *page, *new_page;
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma, buf_dma;
- u32 buf_dma_len, pkt_len;
- void **swdata;
+ u32 buf_dma_len;
int ret;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
@@ -299,34 +300,31 @@ static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
}
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page = swdata->data.page;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
+ new_page = page_pool_dev_alloc_pages(rx_chn->pg_pool);
/* if allocation fails we drop the packet but push the
* descriptor back to the ring with old skb to prevent a stall
*/
- if (!new_skb) {
+ if (!new_page) {
netdev_err(ndev,
- "skb alloc failed, dropped mgm pkt from flow %d\n",
+ "page alloc failed, dropped mgm pkt from flow %d\n",
flow_id);
- new_skb = skb;
- skb = NULL; /* return NULL */
- } else {
- /* return the filled skb */
- skb_put(skb, pkt_len);
+ new_page = page;
+ page = NULL; /* return NULL */
}
/* queue another DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_mgm_chn);
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
+ PRUETH_MAX_PKT_SIZE);
if (WARN_ON(ret < 0))
- dev_kfree_skb_any(new_skb);
+ page_pool_recycle_direct(rx_chn->pg_pool, new_page);
- return skb;
+ return page;
}
static void prueth_tx_ts_sr1(struct prueth_emac *emac,
@@ -362,14 +360,14 @@ static void prueth_tx_ts_sr1(struct prueth_emac *emac,
static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
- struct sk_buff *skb;
+ struct page *page;
- skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
- if (!skb)
+ page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
+ if (!page)
return IRQ_NONE;
- prueth_tx_ts_sr1(emac, (void *)skb->data);
- dev_kfree_skb_any(skb);
+ prueth_tx_ts_sr1(emac, (void *)page_address(page));
+ page_pool_recycle_direct(page->pp, page);
return IRQ_HANDLED;
}
@@ -377,15 +375,15 @@ static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
- struct sk_buff *skb;
+ struct page *page;
u32 rsp;
- skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
- if (!skb)
+ page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
+ if (!page)
return IRQ_NONE;
/* Process command response */
- rsp = le32_to_cpu(*(__le32 *)skb->data) & 0xffff0000;
+ rsp = le32_to_cpu(*(__le32 *)page_address(page)) & 0xffff0000;
if (rsp == ICSSG_SHUTDOWN_CMD_SR1) {
netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp);
complete(&emac->cmd_complete);
@@ -394,7 +392,7 @@ static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
complete(&emac->cmd_complete);
}
- dev_kfree_skb_any(skb);
+ page_pool_recycle_direct(page->pp, page);
return IRQ_HANDLED;
}
@@ -440,7 +438,6 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
goto halt_pru;
}
- emac->fw_running = 1;
return 0;
halt_pru:
@@ -449,6 +446,29 @@ halt_pru:
return ret;
}
+static void prueth_emac_stop(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice;
+
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ slice = ICSS_SLICE0;
+ break;
+ case PRUETH_PORT_MII1:
+ slice = ICSS_SLICE1;
+ break;
+ default:
+ netdev_err(emac->ndev, "invalid port\n");
+ return;
+ }
+
+ if (!emac->is_sr1)
+ rproc_shutdown(prueth->txpru[slice]);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+}
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -1009,8 +1029,6 @@ static int prueth_probe(struct platform_device *pdev)
(unsigned long)prueth->msmcram.va);
prueth->msmcram.size = msmc_ram_size;
memset_io(prueth->msmcram.va, 0, msmc_ram_size);
- dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
- prueth->msmcram.va, prueth->msmcram.size);
prueth->iep0 = icss_iep_get_idx(np, 0);
if (IS_ERR(prueth->iep0)) {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 8800bd3a8d07..7159baa0155c 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -11,7 +11,6 @@
#define ICSSG_TX_PACKET_OFFSET 0xA0
#define ICSSG_TX_BYTE_OFFSET 0xEC
-#define ICSSG_FW_STATS_BASE 0x0248
static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */
0xb18, /* Slice 1 stats start */
@@ -26,7 +25,17 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
u32 val, reg;
int i;
+ spin_lock(&prueth->stats_lock);
+
for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
+ /* In MII mode TX lines are swapped inside ICSSG, so read Tx stats
+ * from slice1 for port0 and slice0 for port1 to get accurate Tx
+ * stats for a given port
+ */
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII &&
+ icssg_all_miig_stats[i].offset >= ICSSG_TX_PACKET_OFFSET &&
+ icssg_all_miig_stats[i].offset <= ICSSG_TX_BYTE_OFFSET)
+ base = stats_base[slice ^ 1];
regmap_read(prueth->miig_rt,
base + icssg_all_miig_stats[i].offset,
&val);
@@ -44,13 +53,14 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
if (prueth->pa_stats) {
for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
- reg = ICSSG_FW_STATS_BASE +
- icssg_all_pa_stats[i].offset *
- PRUETH_NUM_MACS + slice * sizeof(u32);
+ reg = icssg_all_pa_stats[i].offset +
+ slice * sizeof(u32);
regmap_read(prueth->pa_stats, reg, &val);
emac->pa_stats[i] += val;
}
}
+
+ spin_unlock(&prueth->stats_lock);
}
void icssg_stats_work_handler(struct work_struct *work)
@@ -76,7 +86,7 @@ int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name)
if (emac->prueth->pa_stats) {
for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
if (!strcmp(icssg_all_pa_stats[i].name, stat_name))
- return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)];
+ return emac->pa_stats[i];
}
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.h b/drivers/net/ethernet/ti/icssg/icssg_stats.h
index e88b919f532c..5ec0b38e0c67 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.h
@@ -155,24 +155,10 @@ static const struct icssg_miig_stats icssg_all_miig_stats[] = {
ICSSG_MIIG_STATS(tx_bytes, true),
};
-/**
- * struct pa_stats_regs - ICSSG Firmware maintained PA Stats register
- * @fw_rx_cnt: Number of valid packets sent by Rx PRU to Host on PSI
- * @fw_tx_cnt: Number of valid packets copied by RTU0 to Tx queues
- * @fw_tx_pre_overflow: Host Egress Q (Pre-emptible) Overflow Counter
- * @fw_tx_exp_overflow: Host Egress Q (Express) Overflow Counter
- */
-struct pa_stats_regs {
- u32 fw_rx_cnt;
- u32 fw_tx_cnt;
- u32 fw_tx_pre_overflow;
- u32 fw_tx_exp_overflow;
-};
-
-#define ICSSG_PA_STATS(field) \
-{ \
- #field, \
- offsetof(struct pa_stats_regs, field), \
+#define ICSSG_PA_STATS(field) \
+{ \
+ #field, \
+ field, \
}
struct icssg_pa_stats {
@@ -181,10 +167,38 @@ struct icssg_pa_stats {
};
static const struct icssg_pa_stats icssg_all_pa_stats[] = {
- ICSSG_PA_STATS(fw_rx_cnt),
- ICSSG_PA_STATS(fw_tx_cnt),
- ICSSG_PA_STATS(fw_tx_pre_overflow),
- ICSSG_PA_STATS(fw_tx_exp_overflow),
+ ICSSG_PA_STATS(FW_RTU_PKT_DROP),
+ ICSSG_PA_STATS(FW_Q0_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q1_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q2_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q3_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q4_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q5_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q6_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q7_OVERFLOW),
+ ICSSG_PA_STATS(FW_DROPPED_PKT),
+ ICSSG_PA_STATS(FW_RX_ERROR),
+ ICSSG_PA_STATS(FW_RX_DS_INVALID),
+ ICSSG_PA_STATS(FW_TX_DROPPED_PACKET),
+ ICSSG_PA_STATS(FW_TX_TS_DROPPED_PACKET),
+ ICSSG_PA_STATS(FW_INF_PORT_DISABLED),
+ ICSSG_PA_STATS(FW_INF_SAV),
+ ICSSG_PA_STATS(FW_INF_SA_DL),
+ ICSSG_PA_STATS(FW_INF_PORT_BLOCKED),
+ ICSSG_PA_STATS(FW_INF_DROP_TAGGED),
+ ICSSG_PA_STATS(FW_INF_DROP_PRIOTAGGED),
+ ICSSG_PA_STATS(FW_INF_DROP_NOTAG),
+ ICSSG_PA_STATS(FW_INF_DROP_NOTMEMBER),
+ ICSSG_PA_STATS(FW_RX_EOF_SHORT_FRMERR),
+ ICSSG_PA_STATS(FW_RX_B0_DROP_EARLY_EOF),
+ ICSSG_PA_STATS(FW_TX_JUMBO_FRM_CUTOFF),
+ ICSSG_PA_STATS(FW_RX_EXP_FRAG_Q_DROP),
+ ICSSG_PA_STATS(FW_RX_FIFO_OVERRUN),
+ ICSSG_PA_STATS(FW_CUT_THR_PKT),
+ ICSSG_PA_STATS(FW_HOST_RX_PKT_CNT),
+ ICSSG_PA_STATS(FW_HOST_TX_PKT_CNT),
+ ICSSG_PA_STATS(FW_HOST_EGRESS_Q_PRE_OVERFLOW),
+ ICSSG_PA_STATS(FW_HOST_EGRESS_Q_EXP_OVERFLOW),
};
#endif /* __NET_TI_ICSSG_STATS_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
index 424a7e945ea8..490a9cc06fb0 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
@@ -231,4 +231,37 @@
/* Start of 32 bits PA_STAT counters */
#define PA_STAT_32b_START_OFFSET 0x0080
+#define FW_RTU_PKT_DROP 0x0088
+#define FW_Q0_OVERFLOW 0x0090
+#define FW_Q1_OVERFLOW 0x0098
+#define FW_Q2_OVERFLOW 0x00A0
+#define FW_Q3_OVERFLOW 0x00A8
+#define FW_Q4_OVERFLOW 0x00B0
+#define FW_Q5_OVERFLOW 0x00B8
+#define FW_Q6_OVERFLOW 0x00C0
+#define FW_Q7_OVERFLOW 0x00C8
+#define FW_DROPPED_PKT 0x00F8
+#define FW_RX_ERROR 0x0100
+#define FW_RX_DS_INVALID 0x0108
+#define FW_TX_DROPPED_PACKET 0x0110
+#define FW_TX_TS_DROPPED_PACKET 0x0118
+#define FW_INF_PORT_DISABLED 0x0120
+#define FW_INF_SAV 0x0128
+#define FW_INF_SA_DL 0x0130
+#define FW_INF_PORT_BLOCKED 0x0138
+#define FW_INF_DROP_TAGGED 0x0140
+#define FW_INF_DROP_PRIOTAGGED 0x0148
+#define FW_INF_DROP_NOTAG 0x0150
+#define FW_INF_DROP_NOTMEMBER 0x0158
+#define FW_RX_EOF_SHORT_FRMERR 0x0188
+#define FW_RX_B0_DROP_EARLY_EOF 0x0190
+#define FW_TX_JUMBO_FRM_CUTOFF 0x0198
+#define FW_RX_EXP_FRAG_Q_DROP 0x01A0
+#define FW_RX_FIFO_OVERRUN 0x01A8
+#define FW_CUT_THR_PKT 0x01B0
+#define FW_HOST_RX_PKT_CNT 0x0248
+#define FW_HOST_TX_PKT_CNT 0x0250
+#define FW_HOST_EGRESS_Q_PRE_OVERFLOW 0x0258
+#define FW_HOST_EGRESS_Q_EXP_OVERFLOW 0x0260
+
#endif /* __NET_TI_ICSSG_SWITCH_MAP_H */
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 63e686f0b119..55a1a96cd834 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2833,7 +2833,7 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
static void netcp_ethss_timer(struct timer_list *t)
{
- struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
+ struct gbe_priv *gbe_dev = timer_container_of(gbe_dev, t, timer);
struct gbe_intf *gbe_intf;
struct gbe_slave *slave;
@@ -3796,7 +3796,7 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
{
struct gbe_priv *gbe_dev = inst_priv;
- del_timer_sync(&gbe_dev->timer);
+ timer_delete_sync(&gbe_dev->timer);
cpts_release(gbe_dev->cpts);
cpsw_ale_stop(gbe_dev->ale);
netcp_txpipe_close(&gbe_dev->tx_pipe);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index b3da76efa8f5..a55b0f951181 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -332,13 +332,13 @@ static void tlan_stop(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
- del_timer_sync(&priv->media_timer);
+ timer_delete_sync(&priv->media_timer);
tlan_read_and_clear_stats(dev, TLAN_RECORD);
outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
/* Reset and power down phy */
tlan_reset_adapter(dev);
if (priv->timer.function != NULL) {
- del_timer_sync(&priv->timer);
+ timer_delete_sync(&priv->timer);
priv->timer.function = NULL;
}
}
@@ -1815,7 +1815,7 @@ ThunderLAN driver timer function
static void tlan_timer(struct timer_list *t)
{
- struct tlan_priv *priv = from_timer(priv, t, timer);
+ struct tlan_priv *priv = timer_container_of(priv, t, timer);
struct net_device *dev = priv->dev;
u32 elapsed;
unsigned long flags = 0;
@@ -2746,7 +2746,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
static void tlan_phy_monitor(struct timer_list *t)
{
- struct tlan_priv *priv = from_timer(priv, t, media_timer);
+ struct tlan_priv *priv = timer_container_of(priv, t, media_timer);
struct net_device *dev = priv->dev;
u16 phy;
u16 phy_status;
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
index 701e9b7c1c3b..b1e27e3b99eb 100644
--- a/drivers/net/ethernet/toshiba/Kconfig
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_TOSHIBA
bool "Toshiba devices"
default y
- depends on PCI && (PPC_IBM_CELL_BLADE || MIPS) || PPC_PS3
+ depends on PCI && MIPS || PPC_PS3
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -39,15 +39,6 @@ config GELIC_WIRELESS
the driver automatically distinguishes the models, you can
safely enable this option even if you have a wireless-less model.
-config SPIDER_NET
- tristate "Spider Gigabit Ethernet driver"
- depends on PCI && PPC_IBM_CELL_BLADE
- select FW_LOADER
- select SUNGEM_PHY
- help
- This driver supports the Gigabit Ethernet chips present on the
- Cell Processor-Based Blades from IBM.
-
config TC35815
tristate "TOSHIBA TC35815 Ethernet support"
depends on PCI && MIPS
diff --git a/drivers/net/ethernet/toshiba/Makefile b/drivers/net/ethernet/toshiba/Makefile
index f434fd0f429e..27e2164cf7e9 100644
--- a/drivers/net/ethernet/toshiba/Makefile
+++ b/drivers/net/ethernet/toshiba/Makefile
@@ -6,6 +6,4 @@
obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
-spidernet-y += spider_net.o spider_net_ethtool.o
-obj-$(CONFIG_SPIDER_NET) += spidernet.o
obj-$(CONFIG_TC35815) += tc35815.o
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
deleted file mode 100644
index a4937c18d7cb..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ /dev/null
@@ -1,2556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Network device driver for Cell Processor-Based Blade and Celleb platform
- *
- * (C) Copyright IBM Corp. 2005
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#include <linux/compiler.h>
-#include <linux/crc32.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/firmware.h>
-#include <linux/if_vlan.h>
-#include <linux/in.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/gfp.h>
-#include <linux/ioport.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/mii.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/skbuff.h>
-#include <linux/tcp.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <linux/of.h>
-#include <net/checksum.h>
-
-#include "spider_net.h"
-
-MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
- "<Jens.Osterkamp@de.ibm.com>");
-MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(VERSION);
-MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
-
-static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
-static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
-
-module_param(rx_descriptors, int, 0444);
-module_param(tx_descriptors, int, 0444);
-
-MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
- "in rx chains");
-MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
- "in tx chain");
-
-char spider_net_driver_name[] = "spidernet";
-
-static const struct pci_device_id spider_net_pci_tbl[] = {
- { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
-
-/**
- * spider_net_read_reg - reads an SMMIO register of a card
- * @card: device structure
- * @reg: register to read from
- *
- * returns the content of the specified SMMIO register.
- */
-static inline u32
-spider_net_read_reg(struct spider_net_card *card, u32 reg)
-{
- /* We use the powerpc specific variants instead of readl_be() because
- * we know spidernet is not a real PCI device and we can thus avoid the
- * performance hit caused by the PCI workarounds.
- */
- return in_be32(card->regs + reg);
-}
-
-/**
- * spider_net_write_reg - writes to an SMMIO register of a card
- * @card: device structure
- * @reg: register to write to
- * @value: value to write into the specified SMMIO register
- */
-static inline void
-spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
-{
- /* We use the powerpc specific variants instead of writel_be() because
- * we know spidernet is not a real PCI device and we can thus avoid the
- * performance hit caused by the PCI workarounds.
- */
- out_be32(card->regs + reg, value);
-}
-
-/**
- * spider_net_write_phy - write to phy register
- * @netdev: adapter to be written to
- * @mii_id: id of MII
- * @reg: PHY register
- * @val: value to be written to phy register
- *
- * spider_net_write_phy_register writes to an arbitrary PHY
- * register via the spider GPCWOPCMD register. We assume the queue does
- * not run full (not more than 15 commands outstanding).
- **/
-static void
-spider_net_write_phy(struct net_device *netdev, int mii_id,
- int reg, int val)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 writevalue;
-
- writevalue = ((u32)mii_id << 21) |
- ((u32)reg << 16) | ((u32)val);
-
- spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
-}
-
-/**
- * spider_net_read_phy - read from phy register
- * @netdev: network device to be read from
- * @mii_id: id of MII
- * @reg: PHY register
- *
- * Returns value read from PHY register
- *
- * spider_net_write_phy reads from an arbitrary PHY
- * register via the spider GPCROPCMD register
- **/
-static int
-spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 readvalue;
-
- readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
- spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
-
- /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
- * interrupt, as we poll for the completion of the read operation
- * in spider_net_read_phy. Should take about 50 us
- */
- do {
- readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
- } while (readvalue & SPIDER_NET_GPREXEC);
-
- readvalue &= SPIDER_NET_GPRDAT_MASK;
-
- return readvalue;
-}
-
-/**
- * spider_net_setup_aneg - initial auto-negotiation setup
- * @card: device structure
- **/
-static void
-spider_net_setup_aneg(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
- u32 advertise = 0;
- u16 bmsr, estat;
-
- bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
- estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
-
- if (bmsr & BMSR_10HALF)
- advertise |= ADVERTISED_10baseT_Half;
- if (bmsr & BMSR_10FULL)
- advertise |= ADVERTISED_10baseT_Full;
- if (bmsr & BMSR_100HALF)
- advertise |= ADVERTISED_100baseT_Half;
- if (bmsr & BMSR_100FULL)
- advertise |= ADVERTISED_100baseT_Full;
-
- if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
- advertise |= SUPPORTED_1000baseT_Full;
- if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
- advertise |= SUPPORTED_1000baseT_Half;
-
- sungem_phy_probe(phy, phy->mii_id);
- phy->def->ops->setup_aneg(phy, advertise);
-
-}
-
-/**
- * spider_net_rx_irq_off - switch off rx irq on this spider card
- * @card: device structure
- *
- * switches off rx irq by masking them out in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_off(struct spider_net_card *card)
-{
- u32 regvalue;
-
- regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
-}
-
-/**
- * spider_net_rx_irq_on - switch on rx irq on this spider card
- * @card: device structure
- *
- * switches on rx irq by enabling them in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_on(struct spider_net_card *card)
-{
- u32 regvalue;
-
- regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
-}
-
-/**
- * spider_net_set_promisc - sets the unicast address or the promiscuous mode
- * @card: card structure
- *
- * spider_net_set_promisc sets the unicast destination address filter and
- * thus either allows for non-promisc mode or promisc mode
- */
-static void
-spider_net_set_promisc(struct spider_net_card *card)
-{
- u32 macu, macl;
- struct net_device *netdev = card->netdev;
-
- if (netdev->flags & IFF_PROMISC) {
- /* clear destination entry 0 */
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
- SPIDER_NET_PROMISC_VALUE);
- } else {
- macu = netdev->dev_addr[0];
- macu <<= 8;
- macu |= netdev->dev_addr[1];
- memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
-
- macu |= SPIDER_NET_UA_DESCR_VALUE;
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
- SPIDER_NET_NONPROMISC_VALUE);
- }
-}
-
-/**
- * spider_net_get_descr_status -- returns the status of a descriptor
- * @hwdescr: descriptor to look at
- *
- * returns the status as in the dmac_cmd_status field of the descriptor
- */
-static inline int
-spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
-{
- return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
-}
-
-/**
- * spider_net_free_chain - free descriptor chain
- * @card: card structure
- * @chain: address of chain
- *
- */
-static void
-spider_net_free_chain(struct spider_net_card *card,
- struct spider_net_descr_chain *chain)
-{
- struct spider_net_descr *descr;
-
- descr = chain->ring;
- do {
- descr->bus_addr = 0;
- descr->hwdescr->next_descr_addr = 0;
- descr = descr->next;
- } while (descr != chain->ring);
-
- dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
- chain->hwring, chain->dma_addr);
-}
-
-/**
- * spider_net_init_chain - alloc and link descriptor chain
- * @card: card structure
- * @chain: address of chain
- *
- * We manage a circular list that mirrors the hardware structure,
- * except that the hardware uses bus addresses.
- *
- * Returns 0 on success, <0 on failure
- */
-static int
-spider_net_init_chain(struct spider_net_card *card,
- struct spider_net_descr_chain *chain)
-{
- int i;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- dma_addr_t buf;
- size_t alloc_size;
-
- alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
-
- chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
- &chain->dma_addr, GFP_KERNEL);
- if (!chain->hwring)
- return -ENOMEM;
-
- /* Set up the hardware pointers in each descriptor */
- descr = chain->ring;
- hwdescr = chain->hwring;
- buf = chain->dma_addr;
- for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- hwdescr->next_descr_addr = 0;
-
- descr->hwdescr = hwdescr;
- descr->bus_addr = buf;
- descr->next = descr + 1;
- descr->prev = descr - 1;
-
- buf += sizeof(struct spider_net_hw_descr);
- }
- /* do actual circular list */
- (descr-1)->next = chain->ring;
- chain->ring->prev = descr-1;
-
- spin_lock_init(&chain->lock);
- chain->head = chain->ring;
- chain->tail = chain->ring;
- return 0;
-}
-
-/**
- * spider_net_free_rx_chain_contents - frees descr contents in rx chain
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- */
-static void
-spider_net_free_rx_chain_contents(struct spider_net_card *card)
-{
- struct spider_net_descr *descr;
-
- descr = card->rx_chain.head;
- do {
- if (descr->skb) {
- dma_unmap_single(&card->pdev->dev,
- descr->hwdescr->buf_addr,
- SPIDER_NET_MAX_FRAME,
- DMA_BIDIRECTIONAL);
- dev_kfree_skb(descr->skb);
- descr->skb = NULL;
- }
- descr = descr->next;
- } while (descr != card->rx_chain.head);
-}
-
-/**
- * spider_net_prepare_rx_descr - Reinitialize RX descriptor
- * @card: card structure
- * @descr: descriptor to re-init
- *
- * Return 0 on success, <0 on failure.
- *
- * Allocates a new rx skb, iommu-maps it and attaches it to the
- * descriptor. Mark the descriptor as activated, ready-to-use.
- */
-static int
-spider_net_prepare_rx_descr(struct spider_net_card *card,
- struct spider_net_descr *descr)
-{
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- dma_addr_t buf;
- int offset;
- int bufsize;
-
- /* we need to round up the buffer size to a multiple of 128 */
- bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
- (~(SPIDER_NET_RXBUF_ALIGN - 1));
-
- /* and we need to have it 128 byte aligned, therefore we allocate a
- * bit more
- */
- /* allocate an skb */
- descr->skb = netdev_alloc_skb(card->netdev,
- bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
- if (!descr->skb) {
- if (netif_msg_rx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev,
- "Not enough memory to allocate rx buffer\n");
- card->spider_stats.alloc_rx_skb_error++;
- return -ENOMEM;
- }
- hwdescr->buf_size = bufsize;
- hwdescr->result_size = 0;
- hwdescr->valid_size = 0;
- hwdescr->data_status = 0;
- hwdescr->data_error = 0;
-
- offset = ((unsigned long)descr->skb->data) &
- (SPIDER_NET_RXBUF_ALIGN - 1);
- if (offset)
- skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
- /* iommu-map the skb */
- buf = dma_map_single(&card->pdev->dev, descr->skb->data,
- SPIDER_NET_MAX_FRAME, DMA_FROM_DEVICE);
- if (dma_mapping_error(&card->pdev->dev, buf)) {
- dev_kfree_skb_any(descr->skb);
- descr->skb = NULL;
- if (netif_msg_rx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
- card->spider_stats.rx_iommu_map_error++;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- } else {
- hwdescr->buf_addr = buf;
- wmb();
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
- SPIDER_NET_DMAC_NOINTR_COMPLETE;
- }
-
- return 0;
-}
-
-/**
- * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
- * @card: card structure
- *
- * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
- * chip by writing to the appropriate register. DMA is enabled in
- * spider_net_enable_rxdmac.
- */
-static inline void
-spider_net_enable_rxchtails(struct spider_net_card *card)
-{
- /* assume chain is aligned correctly */
- spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
- card->rx_chain.tail->bus_addr);
-}
-
-/**
- * spider_net_enable_rxdmac - enables a receive DMA controller
- * @card: card structure
- *
- * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
- * in the GDADMACCNTR register
- */
-static inline void
-spider_net_enable_rxdmac(struct spider_net_card *card)
-{
- wmb();
- spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
- SPIDER_NET_DMA_RX_VALUE);
-}
-
-/**
- * spider_net_disable_rxdmac - disables the receive DMA controller
- * @card: card structure
- *
- * spider_net_disable_rxdmac terminates processing on the DMA controller
- * by turing off the DMA controller, with the force-end flag set.
- */
-static inline void
-spider_net_disable_rxdmac(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
- SPIDER_NET_DMA_RX_FEND_VALUE);
-}
-
-/**
- * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
- * @card: card structure
- *
- * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
- */
-static void
-spider_net_refill_rx_chain(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- unsigned long flags;
-
- /* one context doing the refill (and a second context seeing that
- * and omitting it) is ok. If called by NAPI, we'll be called again
- * as spider_net_decode_one_descr is called several times. If some
- * interrupt calls us, the NAPI is about to clean up anyway.
- */
- if (!spin_trylock_irqsave(&chain->lock, flags))
- return;
-
- while (spider_net_get_descr_status(chain->head->hwdescr) ==
- SPIDER_NET_DESCR_NOT_IN_USE) {
- if (spider_net_prepare_rx_descr(card, chain->head))
- break;
- chain->head = chain->head->next;
- }
-
- spin_unlock_irqrestore(&chain->lock, flags);
-}
-
-/**
- * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
- * @card: card structure
- *
- * Returns 0 on success, <0 on failure.
- */
-static int
-spider_net_alloc_rx_skbs(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *start = chain->tail;
- struct spider_net_descr *descr = start;
-
- /* Link up the hardware chain pointers */
- do {
- descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
- descr = descr->next;
- } while (descr != start);
-
- /* Put at least one buffer into the chain. if this fails,
- * we've got a problem. If not, spider_net_refill_rx_chain
- * will do the rest at the end of this function.
- */
- if (spider_net_prepare_rx_descr(card, chain->head))
- goto error;
- else
- chain->head = chain->head->next;
-
- /* This will allocate the rest of the rx buffers;
- * if not, it's business as usual later on.
- */
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- return 0;
-
-error:
- spider_net_free_rx_chain_contents(card);
- return -ENOMEM;
-}
-
-/**
- * spider_net_get_multicast_hash - generates hash for multicast filter table
- * @netdev: interface device structure
- * @addr: multicast address
- *
- * returns the hash value.
- *
- * spider_net_get_multicast_hash calculates a hash value for a given multicast
- * address, that is used to set the multicast filter tables
- */
-static u8
-spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
-{
- u32 crc;
- u8 hash;
- char addr_for_crc[ETH_ALEN] = { 0, };
- int i, bit;
-
- for (i = 0; i < ETH_ALEN * 8; i++) {
- bit = (addr[i / 8] >> (i % 8)) & 1;
- addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
- }
-
- crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
-
- hash = (crc >> 27);
- hash <<= 3;
- hash |= crc & 7;
- hash &= 0xff;
-
- return hash;
-}
-
-/**
- * spider_net_set_multi - sets multicast addresses and promisc flags
- * @netdev: interface device structure
- *
- * spider_net_set_multi configures multicast addresses as needed for the
- * netdev interface. It also sets up multicast, allmulti and promisc
- * flags appropriately
- */
-static void
-spider_net_set_multi(struct net_device *netdev)
-{
- struct netdev_hw_addr *ha;
- u8 hash;
- int i;
- u32 reg;
- struct spider_net_card *card = netdev_priv(netdev);
- DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES);
-
- spider_net_set_promisc(card);
-
- if (netdev->flags & IFF_ALLMULTI) {
- bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES);
- goto write_hash;
- }
-
- bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES);
-
- /* well, we know, what the broadcast hash value is: it's xfd
- hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
- __set_bit(0xfd, bitmask);
-
- netdev_for_each_mc_addr(ha, netdev) {
- hash = spider_net_get_multicast_hash(netdev, ha->addr);
- __set_bit(hash, bitmask);
- }
-
-write_hash:
- for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
- reg = 0;
- if (test_bit(i * 4, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 1, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 2, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 3, bitmask))
- reg += 0x08;
-
- spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
- }
-}
-
-/**
- * spider_net_prepare_tx_descr - fill tx descriptor with skb data
- * @card: card structure
- * @skb: packet to use
- *
- * returns 0 on success, <0 on failure.
- *
- * fills out the descriptor structure with skb data and len. Copies data,
- * if needed (32bit DMA!)
- */
-static int
-spider_net_prepare_tx_descr(struct spider_net_card *card,
- struct sk_buff *skb)
-{
- struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- dma_addr_t buf;
- unsigned long flags;
-
- buf = dma_map_single(&card->pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&card->pdev->dev, buf)) {
- if (netif_msg_tx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
- "Dropping packet\n", skb->data, skb->len);
- card->spider_stats.tx_iommu_map_error++;
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&chain->lock, flags);
- descr = card->tx_chain.head;
- if (descr->next == chain->tail->prev) {
- spin_unlock_irqrestore(&chain->lock, flags);
- dma_unmap_single(&card->pdev->dev, buf, skb->len,
- DMA_TO_DEVICE);
- return -ENOMEM;
- }
- hwdescr = descr->hwdescr;
- chain->head = descr->next;
-
- descr->skb = skb;
- hwdescr->buf_addr = buf;
- hwdescr->buf_size = skb->len;
- hwdescr->next_descr_addr = 0;
- hwdescr->data_status = 0;
-
- hwdescr->dmac_cmd_status =
- SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
- spin_unlock_irqrestore(&chain->lock, flags);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_TCP:
- hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
- break;
- case IPPROTO_UDP:
- hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
- break;
- }
-
- /* Chain the bus address, so that the DMA engine finds this descr. */
- wmb();
- descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
-
- netif_trans_update(card->netdev); /* set netdev watchdog timer */
- return 0;
-}
-
-static int
-spider_net_set_low_watermark(struct spider_net_card *card)
-{
- struct spider_net_descr *descr = card->tx_chain.tail;
- struct spider_net_hw_descr *hwdescr;
- unsigned long flags;
- int status;
- int cnt=0;
- int i;
-
- /* Measure the length of the queue. Measurement does not
- * need to be precise -- does not need a lock.
- */
- while (descr != card->tx_chain.head) {
- status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
- if (status == SPIDER_NET_DESCR_NOT_IN_USE)
- break;
- descr = descr->next;
- cnt++;
- }
-
- /* If TX queue is short, don't even bother with interrupts */
- if (cnt < card->tx_chain.num_desc/4)
- return cnt;
-
- /* Set low-watermark 3/4th's of the way into the queue. */
- descr = card->tx_chain.tail;
- cnt = (cnt*3)/4;
- for (i=0;i<cnt; i++)
- descr = descr->next;
-
- /* Set the new watermark, clear the old watermark */
- spin_lock_irqsave(&card->tx_chain.lock, flags);
- descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
- if (card->low_watermark && card->low_watermark != descr) {
- hwdescr = card->low_watermark->hwdescr;
- hwdescr->dmac_cmd_status =
- hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
- }
- card->low_watermark = descr;
- spin_unlock_irqrestore(&card->tx_chain.lock, flags);
- return cnt;
-}
-
-/**
- * spider_net_release_tx_chain - processes sent tx descriptors
- * @card: adapter structure
- * @brutal: if set, don't care about whether descriptor seems to be in use
- *
- * returns 0 if the tx ring is empty, otherwise 1.
- *
- * spider_net_release_tx_chain releases the tx descriptors that spider has
- * finished with (if non-brutal) or simply release tx descriptors (if brutal).
- * If some other context is calling this function, we return 1 so that we're
- * scheduled again (if we were scheduled) and will not lose initiative.
- */
-static int
-spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
-{
- struct net_device *dev = card->netdev;
- struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- struct sk_buff *skb;
- u32 buf_addr;
- unsigned long flags;
- int status;
-
- while (1) {
- spin_lock_irqsave(&chain->lock, flags);
- if (chain->tail == chain->head) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 0;
- }
- descr = chain->tail;
- hwdescr = descr->hwdescr;
-
- status = spider_net_get_descr_status(hwdescr);
- switch (status) {
- case SPIDER_NET_DESCR_COMPLETE:
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += descr->skb->len;
- break;
-
- case SPIDER_NET_DESCR_CARDOWNED:
- if (!brutal) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 1;
- }
-
- /* fallthrough, if we release the descriptors
- * brutally (then we don't care about
- * SPIDER_NET_DESCR_CARDOWNED)
- */
- fallthrough;
-
- case SPIDER_NET_DESCR_RESPONSE_ERROR:
- case SPIDER_NET_DESCR_PROTECTION_ERROR:
- case SPIDER_NET_DESCR_FORCE_END:
- if (netif_msg_tx_err(card))
- dev_err(&card->netdev->dev, "forcing end of tx descriptor "
- "with status x%02x\n", status);
- dev->stats.tx_errors++;
- break;
-
- default:
- dev->stats.tx_dropped++;
- if (!brutal) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 1;
- }
- }
-
- chain->tail = descr->next;
- hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
- skb = descr->skb;
- descr->skb = NULL;
- buf_addr = hwdescr->buf_addr;
- spin_unlock_irqrestore(&chain->lock, flags);
-
- /* unmap the skb */
- if (skb) {
- dma_unmap_single(&card->pdev->dev, buf_addr, skb->len,
- DMA_TO_DEVICE);
- dev_consume_skb_any(skb);
- }
- }
- return 0;
-}
-
-/**
- * spider_net_kick_tx_dma - enables TX DMA processing
- * @card: card structure
- *
- * This routine will start the transmit DMA running if
- * it is not already running. This routine ned only be
- * called when queueing a new packet to an empty tx queue.
- * Writes the current tx chain head as start address
- * of the tx descriptor chain and enables the transmission
- * DMA engine.
- */
-static inline void
-spider_net_kick_tx_dma(struct spider_net_card *card)
-{
- struct spider_net_descr *descr;
-
- if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
- SPIDER_NET_TX_DMA_EN)
- goto out;
-
- descr = card->tx_chain.tail;
- for (;;) {
- if (spider_net_get_descr_status(descr->hwdescr) ==
- SPIDER_NET_DESCR_CARDOWNED) {
- spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
- descr->bus_addr);
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_VALUE);
- break;
- }
- if (descr == card->tx_chain.head)
- break;
- descr = descr->next;
- }
-
-out:
- mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
-}
-
-/**
- * spider_net_xmit - transmits a frame over the device
- * @skb: packet to send out
- * @netdev: interface device structure
- *
- * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
- */
-static netdev_tx_t
-spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- int cnt;
- struct spider_net_card *card = netdev_priv(netdev);
-
- spider_net_release_tx_chain(card, 0);
-
- if (spider_net_prepare_tx_descr(card, skb) != 0) {
- netdev->stats.tx_dropped++;
- netif_stop_queue(netdev);
- return NETDEV_TX_BUSY;
- }
-
- cnt = spider_net_set_low_watermark(card);
- if (cnt < 5)
- spider_net_kick_tx_dma(card);
- return NETDEV_TX_OK;
-}
-
-/**
- * spider_net_cleanup_tx_ring - cleans up the TX ring
- * @t: timer context used to obtain the pointer to net card data structure
- *
- * spider_net_cleanup_tx_ring is called by either the tx_timer
- * or from the NAPI polling routine.
- * This routine releases resources associted with transmitted
- * packets, including updating the queue tail pointer.
- */
-static void
-spider_net_cleanup_tx_ring(struct timer_list *t)
-{
- struct spider_net_card *card = from_timer(card, t, tx_timer);
- if ((spider_net_release_tx_chain(card, 0) != 0) &&
- (card->netdev->flags & IFF_UP)) {
- spider_net_kick_tx_dma(card);
- netif_wake_queue(card->netdev);
- }
-}
-
-/**
- * spider_net_do_ioctl - called for device ioctls
- * @netdev: interface device structure
- * @ifr: request parameter structure for ioctl
- * @cmd: command code for ioctl
- *
- * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
- * -EOPNOTSUPP is returned, if an unknown ioctl was requested
- */
-static int
-spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
- * @descr: descriptor to process
- * @card: card structure
- *
- * Fills out skb structure and passes the data to the stack.
- * The descriptor state is not changed.
- */
-static void
-spider_net_pass_skb_up(struct spider_net_descr *descr,
- struct spider_net_card *card)
-{
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- struct sk_buff *skb = descr->skb;
- struct net_device *netdev = card->netdev;
- u32 data_status = hwdescr->data_status;
- u32 data_error = hwdescr->data_error;
-
- skb_put(skb, hwdescr->valid_size);
-
- /* the card seems to add 2 bytes of junk in front
- * of the ethernet frame
- */
-#define SPIDER_MISALIGN 2
- skb_pull(skb, SPIDER_MISALIGN);
- skb->protocol = eth_type_trans(skb, netdev);
-
- /* checksum offload */
- skb_checksum_none_assert(skb);
- if (netdev->features & NETIF_F_RXCSUM) {
- if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
- SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
- !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
-
- if (data_status & SPIDER_NET_VLAN_PACKET) {
- /* further enhancements: HW-accel VLAN */
- }
-
- /* update netdevice statistics */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += skb->len;
-
- /* pass skb up to stack */
- netif_receive_skb(skb);
-}
-
-static void show_rx_chain(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *start= chain->tail;
- struct spider_net_descr *descr= start;
- struct spider_net_hw_descr *hwd = start->hwdescr;
- struct device *dev = &card->netdev->dev;
- u32 curr_desc, next_desc;
- int status;
-
- int tot = 0;
- int cnt = 0;
- int off = start - chain->ring;
- int cstat = hwd->dmac_cmd_status;
-
- dev_info(dev, "Total number of descrs=%d\n",
- chain->num_desc);
- dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
- off, cstat);
-
- curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
- next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
-
- status = cstat;
- do
- {
- hwd = descr->hwdescr;
- off = descr - chain->ring;
- status = hwd->dmac_cmd_status;
-
- if (descr == chain->head)
- dev_info(dev, "Chain head is at %d, head status=0x%x\n",
- off, status);
-
- if (curr_desc == descr->bus_addr)
- dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
- off, status);
-
- if (next_desc == descr->bus_addr)
- dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
- off, status);
-
- if (hwd->next_descr_addr == 0)
- dev_info(dev, "chain is cut at %d\n", off);
-
- if (cstat != status) {
- int from = (chain->num_desc + off - cnt) % chain->num_desc;
- int to = (chain->num_desc + off - 1) % chain->num_desc;
- dev_info(dev, "Have %d (from %d to %d) descrs "
- "with stat=0x%08x\n", cnt, from, to, cstat);
- cstat = status;
- cnt = 0;
- }
-
- cnt ++;
- tot ++;
- descr = descr->next;
- } while (descr != start);
-
- dev_info(dev, "Last %d descrs with stat=0x%08x "
- "for a total of %d descrs\n", cnt, cstat, tot);
-
-#ifdef DEBUG
- /* Now dump the whole ring */
- descr = start;
- do
- {
- struct spider_net_hw_descr *hwd = descr->hwdescr;
- status = spider_net_get_descr_status(hwd);
- cnt = descr - chain->ring;
- dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
- cnt, status, descr->skb);
- dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
- descr->bus_addr, hwd->buf_addr, hwd->buf_size);
- dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
- hwd->next_descr_addr, hwd->result_size,
- hwd->valid_size);
- dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
- hwd->dmac_cmd_status, hwd->data_status,
- hwd->data_error);
- dev_info(dev, "\n");
-
- descr = descr->next;
- } while (descr != start);
-#endif
-
-}
-
-/**
- * spider_net_resync_head_ptr - Advance head ptr past empty descrs
- * @card: card structure
- *
- * If the driver fails to keep up and empty the queue, then the
- * hardware wil run out of room to put incoming packets. This
- * will cause the hardware to skip descrs that are full (instead
- * of halting/retrying). Thus, once the driver runs, it wil need
- * to "catch up" to where the hardware chain pointer is at.
- */
-static void spider_net_resync_head_ptr(struct spider_net_card *card)
-{
- unsigned long flags;
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr;
- int i, status;
-
- /* Advance head pointer past any empty descrs */
- descr = chain->head;
- status = spider_net_get_descr_status(descr->hwdescr);
-
- if (status == SPIDER_NET_DESCR_NOT_IN_USE)
- return;
-
- spin_lock_irqsave(&chain->lock, flags);
-
- descr = chain->head;
- status = spider_net_get_descr_status(descr->hwdescr);
- for (i=0; i<chain->num_desc; i++) {
- if (status != SPIDER_NET_DESCR_CARDOWNED) break;
- descr = descr->next;
- status = spider_net_get_descr_status(descr->hwdescr);
- }
- chain->head = descr;
-
- spin_unlock_irqrestore(&chain->lock, flags);
-}
-
-static int spider_net_resync_tail_ptr(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr;
- int i, status;
-
- /* Advance tail pointer past any empty and reaped descrs */
- descr = chain->tail;
- status = spider_net_get_descr_status(descr->hwdescr);
-
- for (i=0; i<chain->num_desc; i++) {
- if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
- (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
- descr = descr->next;
- status = spider_net_get_descr_status(descr->hwdescr);
- }
- chain->tail = descr;
-
- if ((i == chain->num_desc) || (i == 0))
- return 1;
- return 0;
-}
-
-/**
- * spider_net_decode_one_descr - processes an RX descriptor
- * @card: card structure
- *
- * Returns 1 if a packet has been sent to the stack, otherwise 0.
- *
- * Processes an RX descriptor by iommu-unmapping the data buffer
- * and passing the packet up to the stack. This function is called
- * in softirq context, e.g. either bottom half from interrupt or
- * NAPI polling context.
- */
-static int
-spider_net_decode_one_descr(struct spider_net_card *card)
-{
- struct net_device *dev = card->netdev;
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr = chain->tail;
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- u32 hw_buf_addr;
- int status;
-
- status = spider_net_get_descr_status(hwdescr);
-
- /* Nothing in the descriptor, or ring must be empty */
- if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
- (status == SPIDER_NET_DESCR_NOT_IN_USE))
- return 0;
-
- /* descriptor definitively used -- move on tail */
- chain->tail = descr->next;
-
- /* unmap descriptor */
- hw_buf_addr = hwdescr->buf_addr;
- hwdescr->buf_addr = 0xffffffff;
- dma_unmap_single(&card->pdev->dev, hw_buf_addr, SPIDER_NET_MAX_FRAME,
- DMA_FROM_DEVICE);
-
- if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
- (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
- (status == SPIDER_NET_DESCR_FORCE_END) ) {
- if (netif_msg_rx_err(card))
- dev_err(&dev->dev,
- "dropping RX descriptor with state %d\n", status);
- dev->stats.rx_dropped++;
- goto bad_desc;
- }
-
- if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
- (status != SPIDER_NET_DESCR_FRAME_END) ) {
- if (netif_msg_rx_err(card))
- dev_err(&card->netdev->dev,
- "RX descriptor with unknown state %d\n", status);
- card->spider_stats.rx_desc_unk_state++;
- goto bad_desc;
- }
-
- /* The cases we'll throw away the packet immediately */
- if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
- if (netif_msg_rx_err(card))
- dev_err(&card->netdev->dev,
- "error in received descriptor found, "
- "data_status=x%08x, data_error=x%08x\n",
- hwdescr->data_status, hwdescr->data_error);
- goto bad_desc;
- }
-
- if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
- dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
- hwdescr->dmac_cmd_status);
- pr_err("buf_addr=x%08x\n", hw_buf_addr);
- pr_err("buf_size=x%08x\n", hwdescr->buf_size);
- pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
- pr_err("result_size=x%08x\n", hwdescr->result_size);
- pr_err("valid_size=x%08x\n", hwdescr->valid_size);
- pr_err("data_status=x%08x\n", hwdescr->data_status);
- pr_err("data_error=x%08x\n", hwdescr->data_error);
- pr_err("which=%ld\n", descr - card->rx_chain.ring);
-
- card->spider_stats.rx_desc_error++;
- goto bad_desc;
- }
-
- /* Ok, we've got a packet in descr */
- spider_net_pass_skb_up(descr, card);
- descr->skb = NULL;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- return 1;
-
-bad_desc:
- if (netif_msg_rx_err(card))
- show_rx_chain(card);
- dev_kfree_skb_irq(descr->skb);
- descr->skb = NULL;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- return 0;
-}
-
-/**
- * spider_net_poll - NAPI poll function called by the stack to return packets
- * @napi: napi device structure
- * @budget: number of packets we can pass to the stack at most
- *
- * returns 0 if no more packets available to the driver/stack. Returns 1,
- * if the quota is exceeded, but the driver has still packets.
- *
- * spider_net_poll returns all packets from the rx descriptors to the stack
- * (using netif_receive_skb). If all/enough packets are up, the driver
- * reenables interrupts and returns 0. If not, 1 is returned.
- */
-static int spider_net_poll(struct napi_struct *napi, int budget)
-{
- struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
- int packets_done = 0;
-
- while (packets_done < budget) {
- if (!spider_net_decode_one_descr(card))
- break;
-
- packets_done++;
- }
-
- if ((packets_done == 0) && (card->num_rx_ints != 0)) {
- if (!spider_net_resync_tail_ptr(card))
- packets_done = budget;
- spider_net_resync_head_ptr(card);
- }
- card->num_rx_ints = 0;
-
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
-
- spider_net_cleanup_tx_ring(&card->tx_timer);
-
- /* if all packets are in the stack, enable interrupts and return 0 */
- /* if not, return 1 */
- if (packets_done < budget) {
- napi_complete_done(napi, packets_done);
- spider_net_rx_irq_on(card);
- card->ignore_rx_ramfull = 0;
- }
-
- return packets_done;
-}
-
-/**
- * spider_net_set_mac - sets the MAC of an interface
- * @netdev: interface device structure
- * @p: pointer to new MAC address
- *
- * Returns 0 on success, <0 on failure. Currently, we don't support this
- * and will always return EOPNOTSUPP.
- */
-static int
-spider_net_set_mac(struct net_device *netdev, void *p)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 macl, macu, regvalue;
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(netdev, addr->sa_data);
-
- /* switch off GMACTPE and GMACRPE */
- regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
- regvalue &= ~((1 << 5) | (1 << 6));
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
-
- /* write mac */
- macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) +
- (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]);
- macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]);
- spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
- spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
-
- /* switch GMACTPE and GMACRPE back on */
- regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
- regvalue |= ((1 << 5) | (1 << 6));
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
-
- spider_net_set_promisc(card);
-
- return 0;
-}
-
-/**
- * spider_net_link_reset
- * @netdev: net device structure
- *
- * This is called when the PHY_LINK signal is asserted. For the blade this is
- * not connected so we should never get here.
- *
- */
-static void
-spider_net_link_reset(struct net_device *netdev)
-{
-
- struct spider_net_card *card = netdev_priv(netdev);
-
- del_timer_sync(&card->aneg_timer);
-
- /* clear interrupt, block further interrupts */
- spider_net_write_reg(card, SPIDER_NET_GMACST,
- spider_net_read_reg(card, SPIDER_NET_GMACST));
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
-
- /* reset phy and setup aneg */
- card->aneg_count = 0;
- card->medium = BCM54XX_COPPER;
- spider_net_setup_aneg(card);
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
-
-}
-
-/**
- * spider_net_handle_error_irq - handles errors raised by an interrupt
- * @card: card structure
- * @status_reg: interrupt status register 0 (GHIINT0STS)
- * @error_reg1: interrupt status register 1 (GHIINT1STS)
- * @error_reg2: interrupt status register 2 (GHIINT2STS)
- *
- * spider_net_handle_error_irq treats or ignores all error conditions
- * found when an interrupt is presented
- */
-static void
-spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
- u32 error_reg1, u32 error_reg2)
-{
- u32 i;
- int show_error = 1;
-
- /* check GHIINT0STS ************************************/
- if (status_reg)
- for (i = 0; i < 32; i++)
- if (status_reg & (1<<i))
- switch (i)
- {
- /* let error_reg1 and error_reg2 evaluation decide, what to do
- case SPIDER_NET_PHYINT:
- case SPIDER_NET_GMAC2INT:
- case SPIDER_NET_GMAC1INT:
- case SPIDER_NET_GFIFOINT:
- case SPIDER_NET_DMACINT:
- case SPIDER_NET_GSYSINT:
- break; */
-
- case SPIDER_NET_GIPSINT:
- show_error = 0;
- break;
-
- case SPIDER_NET_GPWOPCMPINT:
- /* PHY write operation completed */
- show_error = 0;
- break;
- case SPIDER_NET_GPROPCMPINT:
- /* PHY read operation completed */
- /* we don't use semaphores, as we poll for the completion
- * of the read operation in spider_net_read_phy. Should take
- * about 50 us
- */
- show_error = 0;
- break;
- case SPIDER_NET_GPWFFINT:
- /* PHY command queue full */
- if (netif_msg_intr(card))
- dev_err(&card->netdev->dev, "PHY write queue full\n");
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GRMDADRINT: not used. print a message */
- /* case SPIDER_NET_GRMARPINT: not used. print a message */
- /* case SPIDER_NET_GRMMPINT: not used. print a message */
-
- case SPIDER_NET_GDTDEN0INT:
- /* someone has set TX_DMA_EN to 0 */
- show_error = 0;
- break;
-
- case SPIDER_NET_GDDDEN0INT:
- case SPIDER_NET_GDCDEN0INT:
- case SPIDER_NET_GDBDEN0INT:
- case SPIDER_NET_GDADEN0INT:
- /* someone has set RX_DMA_EN to 0 */
- show_error = 0;
- break;
-
- /* RX interrupts */
- case SPIDER_NET_GDDFDCINT:
- case SPIDER_NET_GDCFDCINT:
- case SPIDER_NET_GDBFDCINT:
- case SPIDER_NET_GDAFDCINT:
- /* case SPIDER_NET_GDNMINT: not used. print a message */
- /* case SPIDER_NET_GCNMINT: not used. print a message */
- /* case SPIDER_NET_GBNMINT: not used. print a message */
- /* case SPIDER_NET_GANMINT: not used. print a message */
- /* case SPIDER_NET_GRFNMINT: not used. print a message */
- show_error = 0;
- break;
-
- /* TX interrupts */
- case SPIDER_NET_GDTFDCINT:
- show_error = 0;
- break;
- case SPIDER_NET_GTTEDINT:
- show_error = 0;
- break;
- case SPIDER_NET_GDTDCEINT:
- /* chain end. If a descriptor should be sent, kick off
- * tx dma
- if (card->tx_chain.tail != card->tx_chain.head)
- spider_net_kick_tx_dma(card);
- */
- show_error = 0;
- break;
-
- /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
- /* case SPIDER_NET_GFREECNTINT: not used. print a message */
- }
-
- /* check GHIINT1STS ************************************/
- if (error_reg1)
- for (i = 0; i < 32; i++)
- if (error_reg1 & (1<<i))
- switch (i)
- {
- case SPIDER_NET_GTMFLLINT:
- /* TX RAM full may happen on a usual case.
- * Logging is not needed.
- */
- show_error = 0;
- break;
- case SPIDER_NET_GRFDFLLINT:
- case SPIDER_NET_GRFCFLLINT:
- case SPIDER_NET_GRFBFLLINT:
- case SPIDER_NET_GRFAFLLINT:
- case SPIDER_NET_GRMFLLINT:
- /* Could happen when rx chain is full */
- if (card->ignore_rx_ramfull == 0) {
- card->ignore_rx_ramfull = 1;
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- }
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GTMSHTINT: problem, print a message */
- case SPIDER_NET_GDTINVDINT:
- /* allrighty. tx from previous descr ok */
- show_error = 0;
- break;
-
- /* chain end */
- case SPIDER_NET_GDDDCEINT:
- case SPIDER_NET_GDCDCEINT:
- case SPIDER_NET_GDBDCEINT:
- case SPIDER_NET_GDADCEINT:
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- show_error = 0;
- break;
-
- /* invalid descriptor */
- case SPIDER_NET_GDDINVDINT:
- case SPIDER_NET_GDCINVDINT:
- case SPIDER_NET_GDBINVDINT:
- case SPIDER_NET_GDAINVDINT:
- /* Could happen when rx chain is full */
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GDTRSERINT: problem, print a message */
- /* case SPIDER_NET_GDDRSERINT: problem, print a message */
- /* case SPIDER_NET_GDCRSERINT: problem, print a message */
- /* case SPIDER_NET_GDBRSERINT: problem, print a message */
- /* case SPIDER_NET_GDARSERINT: problem, print a message */
- /* case SPIDER_NET_GDSERINT: problem, print a message */
- /* case SPIDER_NET_GDTPTERINT: problem, print a message */
- /* case SPIDER_NET_GDDPTERINT: problem, print a message */
- /* case SPIDER_NET_GDCPTERINT: problem, print a message */
- /* case SPIDER_NET_GDBPTERINT: problem, print a message */
- /* case SPIDER_NET_GDAPTERINT: problem, print a message */
- default:
- show_error = 1;
- break;
- }
-
- /* check GHIINT2STS ************************************/
- if (error_reg2)
- for (i = 0; i < 32; i++)
- if (error_reg2 & (1<<i))
- switch (i)
- {
- /* there is nothing we can (want to) do at this time. Log a
- * message, we can switch on and off the specific values later on
- case SPIDER_NET_GPROPERINT:
- case SPIDER_NET_GMCTCRSNGINT:
- case SPIDER_NET_GMCTLCOLINT:
- case SPIDER_NET_GMCTTMOTINT:
- case SPIDER_NET_GMCRCAERINT:
- case SPIDER_NET_GMCRCALERINT:
- case SPIDER_NET_GMCRALNERINT:
- case SPIDER_NET_GMCROVRINT:
- case SPIDER_NET_GMCRRNTINT:
- case SPIDER_NET_GMCRRXERINT:
- case SPIDER_NET_GTITCSERINT:
- case SPIDER_NET_GTIFMTERINT:
- case SPIDER_NET_GTIPKTRVKINT:
- case SPIDER_NET_GTISPINGINT:
- case SPIDER_NET_GTISADNGINT:
- case SPIDER_NET_GTISPDNGINT:
- case SPIDER_NET_GRIFMTERINT:
- case SPIDER_NET_GRIPKTRVKINT:
- case SPIDER_NET_GRISPINGINT:
- case SPIDER_NET_GRISADNGINT:
- case SPIDER_NET_GRISPDNGINT:
- break;
- */
- default:
- break;
- }
-
- if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
- dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
- "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
- status_reg, error_reg1, error_reg2);
-
- /* clear interrupt sources */
- spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
-}
-
-/**
- * spider_net_interrupt - interrupt handler for spider_net
- * @irq: interrupt number
- * @ptr: pointer to net_device
- *
- * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
- * interrupt found raised by card.
- *
- * This is the interrupt handler, that turns off
- * interrupts for this device and makes the stack poll the driver
- */
-static irqreturn_t
-spider_net_interrupt(int irq, void *ptr)
-{
- struct net_device *netdev = ptr;
- struct spider_net_card *card = netdev_priv(netdev);
- u32 status_reg, error_reg1, error_reg2;
-
- status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
- error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
- error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
-
- if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
- !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
- !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
- return IRQ_NONE;
-
- if (status_reg & SPIDER_NET_RXINT ) {
- spider_net_rx_irq_off(card);
- napi_schedule(&card->napi);
- card->num_rx_ints ++;
- }
- if (status_reg & SPIDER_NET_TXINT)
- napi_schedule(&card->napi);
-
- if (status_reg & SPIDER_NET_LINKINT)
- spider_net_link_reset(netdev);
-
- if (status_reg & SPIDER_NET_ERRINT )
- spider_net_handle_error_irq(card, status_reg,
- error_reg1, error_reg2);
-
- /* clear interrupt sources */
- spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * spider_net_poll_controller - artificial interrupt for netconsole etc.
- * @netdev: interface device structure
- *
- * see Documentation/networking/netconsole.rst
- */
-static void
-spider_net_poll_controller(struct net_device *netdev)
-{
- disable_irq(netdev->irq);
- spider_net_interrupt(netdev->irq, netdev);
- enable_irq(netdev->irq);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
-/**
- * spider_net_enable_interrupts - enable interrupts
- * @card: card structure
- *
- * spider_net_enable_interrupt enables several interrupts
- */
-static void
-spider_net_enable_interrupts(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
- SPIDER_NET_INT0_MASK_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
- SPIDER_NET_INT1_MASK_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
- SPIDER_NET_INT2_MASK_VALUE);
-}
-
-/**
- * spider_net_disable_interrupts - disable interrupts
- * @card: card structure
- *
- * spider_net_disable_interrupts disables all the interrupts
- */
-static void
-spider_net_disable_interrupts(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
-}
-
-/**
- * spider_net_init_card - initializes the card
- * @card: card structure
- *
- * spider_net_init_card initializes the card so that other registers can
- * be used
- */
-static void
-spider_net_init_card(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- /* trigger ETOMOD signal */
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
- spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
-
- spider_net_disable_interrupts(card);
-}
-
-/**
- * spider_net_enable_card - enables the card by setting all kinds of regs
- * @card: card structure
- *
- * spider_net_enable_card sets a lot of SMMIO registers to enable the device
- */
-static void
-spider_net_enable_card(struct spider_net_card *card)
-{
- int i;
- /* the following array consists of (register),(value) pairs
- * that are set in this function. A register of 0 ends the list
- */
- u32 regs[][2] = {
- { SPIDER_NET_GRESUMINTNUM, 0 },
- { SPIDER_NET_GREINTNUM, 0 },
-
- /* set interrupt frame number registers */
- /* clear the single DMA engine registers first */
- { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- /* then set, what we really need */
- { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
-
- /* timer counter registers and stuff */
- { SPIDER_NET_GFREECNNUM, 0 },
- { SPIDER_NET_GONETIMENUM, 0 },
- { SPIDER_NET_GTOUTFRMNUM, 0 },
-
- /* RX mode setting */
- { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
- /* TX mode setting */
- { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
- /* IPSEC mode setting */
- { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
-
- { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
-
- { SPIDER_NET_GMRWOLCTRL, 0 },
- { SPIDER_NET_GTESTMD, 0x10000000 },
- { SPIDER_NET_GTTQMSK, 0x00400040 },
-
- { SPIDER_NET_GMACINTEN, 0 },
-
- /* flow control stuff */
- { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
- { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
-
- { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
- { 0, 0}
- };
-
- i = 0;
- while (regs[i][0]) {
- spider_net_write_reg(card, regs[i][0], regs[i][1]);
- i++;
- }
-
- /* clear unicast filter table entries 1 to 14 */
- for (i = 1; i <= 14; i++) {
- spider_net_write_reg(card,
- SPIDER_NET_GMRUAFILnR + i * 8,
- 0x00080000);
- spider_net_write_reg(card,
- SPIDER_NET_GMRUAFILnR + i * 8 + 4,
- 0x00000000);
- }
-
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
-
- spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
-
- /* set chain tail address for RX chains and
- * enable DMA
- */
- spider_net_enable_rxchtails(card);
- spider_net_enable_rxdmac(card);
-
- spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
- SPIDER_NET_LENLMT_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
- SPIDER_NET_OPMODE_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_GDTBSTA);
-}
-
-/**
- * spider_net_download_firmware - loads firmware into the adapter
- * @card: card structure
- * @firmware_ptr: pointer to firmware data
- *
- * spider_net_download_firmware loads the firmware data into the
- * adapter. It assumes the length etc. to be allright.
- */
-static int
-spider_net_download_firmware(struct spider_net_card *card,
- const void *firmware_ptr)
-{
- int sequencer, i;
- const u32 *fw_ptr = firmware_ptr;
-
- /* stop sequencers */
- spider_net_write_reg(card, SPIDER_NET_GSINIT,
- SPIDER_NET_STOP_SEQ_VALUE);
-
- for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
- sequencer++) {
- spider_net_write_reg(card,
- SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
- sequencer * 8, *fw_ptr);
- fw_ptr++;
- }
- }
-
- if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
- return -EIO;
-
- spider_net_write_reg(card, SPIDER_NET_GSINIT,
- SPIDER_NET_RUN_SEQ_VALUE);
-
- return 0;
-}
-
-/**
- * spider_net_init_firmware - reads in firmware parts
- * @card: card structure
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_init_firmware opens the sequencer firmware and does some basic
- * checks. This function opens and releases the firmware structure. A call
- * to download the firmware is performed before the release.
- *
- * Firmware format
- * ===============
- * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
- * the program for each sequencer. Use the command
- * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
- * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
- * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
- *
- * to generate spider_fw.bin, if you have sequencer programs with something
- * like the following contents for each sequencer:
- * <ONE LINE COMMENT>
- * <FIRST 4-BYTES-WORD FOR SEQUENCER>
- * <SECOND 4-BYTES-WORD FOR SEQUENCER>
- * ...
- * <1024th 4-BYTES-WORD FOR SEQUENCER>
- */
-static int
-spider_net_init_firmware(struct spider_net_card *card)
-{
- struct firmware *firmware = NULL;
- struct device_node *dn;
- const u8 *fw_prop = NULL;
- int err = -ENOENT;
- int fw_size;
-
- if (request_firmware((const struct firmware **)&firmware,
- SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
- if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
- netif_msg_probe(card) ) {
- dev_err(&card->netdev->dev,
- "Incorrect size of spidernet firmware in " \
- "filesystem. Looking in host firmware...\n");
- goto try_host_fw;
- }
- err = spider_net_download_firmware(card, firmware->data);
-
- release_firmware(firmware);
- if (err)
- goto try_host_fw;
-
- goto done;
- }
-
-try_host_fw:
- dn = pci_device_to_OF_node(card->pdev);
- if (!dn)
- goto out_err;
-
- fw_prop = of_get_property(dn, "firmware", &fw_size);
- if (!fw_prop)
- goto out_err;
-
- if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
- netif_msg_probe(card) ) {
- dev_err(&card->netdev->dev,
- "Incorrect size of spidernet firmware in host firmware\n");
- goto done;
- }
-
- err = spider_net_download_firmware(card, fw_prop);
-
-done:
- return err;
-out_err:
- if (netif_msg_probe(card))
- dev_err(&card->netdev->dev,
- "Couldn't find spidernet firmware in filesystem " \
- "or host firmware\n");
- return err;
-}
-
-/**
- * spider_net_open - called upon ifonfig up
- * @netdev: interface device structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_open allocates all the descriptors and memory needed for
- * operation, sets up multicast list and enables interrupts
- */
-int
-spider_net_open(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- int result;
-
- result = spider_net_init_firmware(card);
- if (result)
- goto init_firmware_failed;
-
- /* start probing with copper */
- card->aneg_count = 0;
- card->medium = BCM54XX_COPPER;
- spider_net_setup_aneg(card);
- if (card->phy.def->phy_id)
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
-
- result = spider_net_init_chain(card, &card->tx_chain);
- if (result)
- goto alloc_tx_failed;
- card->low_watermark = NULL;
-
- result = spider_net_init_chain(card, &card->rx_chain);
- if (result)
- goto alloc_rx_failed;
-
- /* Allocate rx skbs */
- result = spider_net_alloc_rx_skbs(card);
- if (result)
- goto alloc_skbs_failed;
-
- spider_net_set_multi(netdev);
-
- /* further enhancement: setup hw vlan, if needed */
-
- result = -EBUSY;
- if (request_irq(netdev->irq, spider_net_interrupt,
- IRQF_SHARED, netdev->name, netdev))
- goto register_int_failed;
-
- spider_net_enable_card(card);
-
- netif_start_queue(netdev);
- netif_carrier_on(netdev);
- napi_enable(&card->napi);
-
- spider_net_enable_interrupts(card);
-
- return 0;
-
-register_int_failed:
- spider_net_free_rx_chain_contents(card);
-alloc_skbs_failed:
- spider_net_free_chain(card, &card->rx_chain);
-alloc_rx_failed:
- spider_net_free_chain(card, &card->tx_chain);
-alloc_tx_failed:
- del_timer_sync(&card->aneg_timer);
-init_firmware_failed:
- return result;
-}
-
-/**
- * spider_net_link_phy
- * @t: timer context used to obtain the pointer to net card data structure
- */
-static void spider_net_link_phy(struct timer_list *t)
-{
- struct spider_net_card *card = from_timer(card, t, aneg_timer);
- struct mii_phy *phy = &card->phy;
-
- /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
- if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
-
- pr_debug("%s: link is down trying to bring it up\n",
- card->netdev->name);
-
- switch (card->medium) {
- case BCM54XX_COPPER:
- /* enable fiber with autonegotiation first */
- if (phy->def->ops->enable_fiber)
- phy->def->ops->enable_fiber(phy, 1);
- card->medium = BCM54XX_FIBER;
- break;
-
- case BCM54XX_FIBER:
- /* fiber didn't come up, try to disable fiber autoneg */
- if (phy->def->ops->enable_fiber)
- phy->def->ops->enable_fiber(phy, 0);
- card->medium = BCM54XX_UNKNOWN;
- break;
-
- case BCM54XX_UNKNOWN:
- /* copper, fiber with and without failed,
- * retry from beginning
- */
- spider_net_setup_aneg(card);
- card->medium = BCM54XX_COPPER;
- break;
- }
-
- card->aneg_count = 0;
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
- return;
- }
-
- /* link still not up, try again later */
- if (!(phy->def->ops->poll_link(phy))) {
- card->aneg_count++;
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
- return;
- }
-
- /* link came up, get abilities */
- phy->def->ops->read_link(phy);
-
- spider_net_write_reg(card, SPIDER_NET_GMACST,
- spider_net_read_reg(card, SPIDER_NET_GMACST));
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
-
- if (phy->speed == 1000)
- spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
- else
- spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
-
- card->aneg_count = 0;
-
- pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
- card->netdev->name, phy->speed,
- phy->duplex == 1 ? "Full" : "Half",
- phy->autoneg == 1 ? "" : "no ");
-}
-
-/**
- * spider_net_setup_phy - setup PHY
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_setup_phy is used as part of spider_net_probe.
- **/
-static int
-spider_net_setup_phy(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
- SPIDER_NET_DMASEL_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
- SPIDER_NET_PHY_CTRL_VALUE);
-
- phy->dev = card->netdev;
- phy->mdio_read = spider_net_read_phy;
- phy->mdio_write = spider_net_write_phy;
-
- for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
- unsigned short id;
- id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
- if (id != 0x0000 && id != 0xffff) {
- if (!sungem_phy_probe(phy, phy->mii_id)) {
- pr_info("Found %s.\n", phy->def->name);
- break;
- }
- }
- }
-
- return 0;
-}
-
-/**
- * spider_net_workaround_rxramfull - work around firmware bug
- * @card: card structure
- *
- * no return value
- **/
-static void
-spider_net_workaround_rxramfull(struct spider_net_card *card)
-{
- int i, sequencer = 0;
-
- /* cancel reset */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- /* empty sequencer data */
- for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
- sequencer++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
- sequencer * 8, 0x0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
- sequencer * 8, 0x0);
- }
- }
-
- /* set sequencer operation */
- spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
-
- /* reset */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
-}
-
-/**
- * spider_net_stop - called upon ifconfig down
- * @netdev: interface device structure
- *
- * always returns 0
- */
-int
-spider_net_stop(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- napi_disable(&card->napi);
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- del_timer_sync(&card->tx_timer);
- del_timer_sync(&card->aneg_timer);
-
- spider_net_disable_interrupts(card);
-
- free_irq(netdev->irq, netdev);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_FEND_VALUE);
-
- /* turn off DMA, force end */
- spider_net_disable_rxdmac(card);
-
- /* release chains */
- spider_net_release_tx_chain(card, 1);
- spider_net_free_rx_chain_contents(card);
-
- spider_net_free_chain(card, &card->tx_chain);
- spider_net_free_chain(card, &card->rx_chain);
-
- return 0;
-}
-
-/**
- * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
- * function (to be called not under interrupt status)
- * @work: work context used to obtain the pointer to net card data structure
- *
- * called as task when tx hangs, resets interface (if interface is up)
- */
-static void
-spider_net_tx_timeout_task(struct work_struct *work)
-{
- struct spider_net_card *card =
- container_of(work, struct spider_net_card, tx_timeout_task);
- struct net_device *netdev = card->netdev;
-
- if (!(netdev->flags & IFF_UP))
- goto out;
-
- netif_device_detach(netdev);
- spider_net_stop(netdev);
-
- spider_net_workaround_rxramfull(card);
- spider_net_init_card(card);
-
- if (spider_net_setup_phy(card))
- goto out;
-
- spider_net_open(netdev);
- spider_net_kick_tx_dma(card);
- netif_device_attach(netdev);
-
-out:
- atomic_dec(&card->tx_timeout_task_counter);
-}
-
-/**
- * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
- * @netdev: interface device structure
- * @txqueue: unused
- *
- * called, if tx hangs. Schedules a task that resets the interface
- */
-static void
-spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
-{
- struct spider_net_card *card;
-
- card = netdev_priv(netdev);
- atomic_inc(&card->tx_timeout_task_counter);
- if (netdev->flags & IFF_UP)
- schedule_work(&card->tx_timeout_task);
- else
- atomic_dec(&card->tx_timeout_task_counter);
- card->spider_stats.tx_timeouts++;
-}
-
-static const struct net_device_ops spider_net_ops = {
- .ndo_open = spider_net_open,
- .ndo_stop = spider_net_stop,
- .ndo_start_xmit = spider_net_xmit,
- .ndo_set_rx_mode = spider_net_set_multi,
- .ndo_set_mac_address = spider_net_set_mac,
- .ndo_eth_ioctl = spider_net_do_ioctl,
- .ndo_tx_timeout = spider_net_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- /* HW VLAN */
-#ifdef CONFIG_NET_POLL_CONTROLLER
- /* poll controller */
- .ndo_poll_controller = spider_net_poll_controller,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-};
-
-/**
- * spider_net_setup_netdev_ops - initialization of net_device operations
- * @netdev: net_device structure
- *
- * fills out function pointers in the net_device structure
- */
-static void
-spider_net_setup_netdev_ops(struct net_device *netdev)
-{
- netdev->netdev_ops = &spider_net_ops;
- netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
- /* ethtool ops */
- netdev->ethtool_ops = &spider_net_ethtool_ops;
-}
-
-/**
- * spider_net_setup_netdev - initialization of net_device
- * @card: card structure
- *
- * Returns 0 on success or <0 on failure
- *
- * spider_net_setup_netdev initializes the net_device structure
- **/
-static int
-spider_net_setup_netdev(struct spider_net_card *card)
-{
- int result;
- struct net_device *netdev = card->netdev;
- struct device_node *dn;
- struct sockaddr addr;
- const u8 *mac;
-
- SET_NETDEV_DEV(netdev, &card->pdev->dev);
-
- pci_set_drvdata(card->pdev, netdev);
-
- timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
- netdev->irq = card->pdev->irq;
-
- card->aneg_count = 0;
- timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
-
- netif_napi_add(netdev, &card->napi, spider_net_poll);
-
- spider_net_setup_netdev_ops(netdev);
-
- netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
- if (SPIDER_NET_RX_CSUM_DEFAULT)
- netdev->features |= NETIF_F_RXCSUM;
- netdev->features |= NETIF_F_IP_CSUM;
- /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- * NETIF_F_HW_VLAN_CTAG_FILTER
- */
- netdev->lltx = true;
-
- /* MTU range: 64 - 2294 */
- netdev->min_mtu = SPIDER_NET_MIN_MTU;
- netdev->max_mtu = SPIDER_NET_MAX_MTU;
-
- netdev->irq = card->pdev->irq;
- card->num_rx_ints = 0;
- card->ignore_rx_ramfull = 0;
-
- dn = pci_device_to_OF_node(card->pdev);
- if (!dn)
- return -EIO;
-
- mac = of_get_property(dn, "local-mac-address", NULL);
- if (!mac)
- return -EIO;
- memcpy(addr.sa_data, mac, ETH_ALEN);
-
- result = spider_net_set_mac(netdev, &addr);
- if ((result) && (netif_msg_probe(card)))
- dev_err(&card->netdev->dev,
- "Failed to set MAC address: %i\n", result);
-
- result = register_netdev(netdev);
- if (result) {
- if (netif_msg_probe(card))
- dev_err(&card->netdev->dev,
- "Couldn't register net_device: %i\n", result);
- return result;
- }
-
- if (netif_msg_probe(card))
- pr_info("Initialized device %s.\n", netdev->name);
-
- return 0;
-}
-
-/**
- * spider_net_alloc_card - allocates net_device and card structure
- *
- * returns the card structure or NULL in case of errors
- *
- * the card and net_device structures are linked to each other
- */
-static struct spider_net_card *
-spider_net_alloc_card(void)
-{
- struct net_device *netdev;
- struct spider_net_card *card;
-
- netdev = alloc_etherdev(struct_size(card, darray,
- size_add(tx_descriptors, rx_descriptors)));
- if (!netdev)
- return NULL;
-
- card = netdev_priv(netdev);
- card->netdev = netdev;
- card->msg_enable = SPIDER_NET_DEFAULT_MSG;
- INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
- init_waitqueue_head(&card->waitq);
- atomic_set(&card->tx_timeout_task_counter, 0);
-
- card->rx_chain.num_desc = rx_descriptors;
- card->rx_chain.ring = card->darray;
- card->tx_chain.num_desc = tx_descriptors;
- card->tx_chain.ring = card->darray + rx_descriptors;
-
- return card;
-}
-
-/**
- * spider_net_undo_pci_setup - releases PCI ressources
- * @card: card structure
- *
- * spider_net_undo_pci_setup releases the mapped regions
- */
-static void
-spider_net_undo_pci_setup(struct spider_net_card *card)
-{
- iounmap(card->regs);
- pci_release_regions(card->pdev);
-}
-
-/**
- * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
- * @pdev: PCI device
- *
- * Returns the card structure or NULL if any errors occur
- *
- * spider_net_setup_pci_dev initializes pdev and together with the
- * functions called in spider_net_open configures the device so that
- * data can be transferred over it
- * The net_device structure is attached to the card structure, if the
- * function returns without error.
- **/
-static struct spider_net_card *
-spider_net_setup_pci_dev(struct pci_dev *pdev)
-{
- struct spider_net_card *card;
- unsigned long mmio_start, mmio_len;
-
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "Couldn't enable PCI device\n");
- return NULL;
- }
-
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev,
- "Couldn't find proper PCI device base address.\n");
- goto out_disable_dev;
- }
-
- if (pci_request_regions(pdev, spider_net_driver_name)) {
- dev_err(&pdev->dev,
- "Couldn't obtain PCI resources, aborting.\n");
- goto out_disable_dev;
- }
-
- pci_set_master(pdev);
-
- card = spider_net_alloc_card();
- if (!card) {
- dev_err(&pdev->dev,
- "Couldn't allocate net_device structure, aborting.\n");
- goto out_release_regions;
- }
- card->pdev = pdev;
-
- /* fetch base address and length of first resource */
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
-
- card->netdev->mem_start = mmio_start;
- card->netdev->mem_end = mmio_start + mmio_len;
- card->regs = ioremap(mmio_start, mmio_len);
-
- if (!card->regs) {
- dev_err(&pdev->dev,
- "Couldn't obtain PCI resources, aborting.\n");
- goto out_release_regions;
- }
-
- return card;
-
-out_release_regions:
- pci_release_regions(pdev);
-out_disable_dev:
- pci_disable_device(pdev);
- return NULL;
-}
-
-/**
- * spider_net_probe - initialization of a device
- * @pdev: PCI device
- * @ent: entry in the device id list
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_probe initializes pdev and registers a net_device
- * structure for it. After that, the device can be ifconfig'ed up
- **/
-static int
-spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = -EIO;
- struct spider_net_card *card;
-
- card = spider_net_setup_pci_dev(pdev);
- if (!card)
- goto out;
-
- spider_net_workaround_rxramfull(card);
- spider_net_init_card(card);
-
- err = spider_net_setup_phy(card);
- if (err)
- goto out_undo_pci;
-
- err = spider_net_setup_netdev(card);
- if (err)
- goto out_undo_pci;
-
- return 0;
-
-out_undo_pci:
- spider_net_undo_pci_setup(card);
- free_netdev(card->netdev);
-out:
- return err;
-}
-
-/**
- * spider_net_remove - removal of a device
- * @pdev: PCI device
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_remove is called to remove the device and unregisters the
- * net_device
- **/
-static void
-spider_net_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev;
- struct spider_net_card *card;
-
- netdev = pci_get_drvdata(pdev);
- card = netdev_priv(netdev);
-
- wait_event(card->waitq,
- atomic_read(&card->tx_timeout_task_counter) == 0);
-
- unregister_netdev(netdev);
-
- /* switch off card */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- spider_net_undo_pci_setup(card);
- free_netdev(netdev);
-}
-
-static struct pci_driver spider_net_driver = {
- .name = spider_net_driver_name,
- .id_table = spider_net_pci_tbl,
- .probe = spider_net_probe,
- .remove = spider_net_remove
-};
-
-/**
- * spider_net_init - init function when the driver is loaded
- *
- * spider_net_init registers the device driver
- */
-static int __init spider_net_init(void)
-{
- printk(KERN_INFO "Spidernet version %s.\n", VERSION);
-
- if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
- rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
- pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
- }
- if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
- rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
- pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
- }
- if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
- tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
- pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
- }
- if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
- tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
- pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
- }
-
- return pci_register_driver(&spider_net_driver);
-}
-
-/**
- * spider_net_cleanup - exit function when driver is unloaded
- *
- * spider_net_cleanup unregisters the device driver
- */
-static void __exit spider_net_cleanup(void)
-{
- pci_unregister_driver(&spider_net_driver);
-}
-
-module_init(spider_net_init);
-module_exit(spider_net_cleanup);
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
deleted file mode 100644
index 51948e2b3a34..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ /dev/null
@@ -1,475 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Network device driver for Cell Processor-Based Blade and Celleb platform
- *
- * (C) Copyright IBM Corp. 2005
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#ifndef _SPIDER_NET_H
-#define _SPIDER_NET_H
-
-#define VERSION "2.0 B"
-
-#include <linux/sungem_phy.h>
-
-int spider_net_stop(struct net_device *netdev);
-int spider_net_open(struct net_device *netdev);
-
-extern const struct ethtool_ops spider_net_ethtool_ops;
-
-extern char spider_net_driver_name[];
-
-#define SPIDER_NET_MAX_FRAME 2312
-#define SPIDER_NET_MAX_MTU 2294
-#define SPIDER_NET_MIN_MTU 64
-
-#define SPIDER_NET_RXBUF_ALIGN 128
-
-#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
-#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_RX_DESCRIPTORS_MAX 512
-
-#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
-#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
-
-#define SPIDER_NET_TX_TIMER (HZ/5)
-#define SPIDER_NET_ANEG_TIMER (HZ)
-#define SPIDER_NET_ANEG_TIMEOUT 5
-
-#define SPIDER_NET_RX_CSUM_DEFAULT 1
-
-#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
-
-#define SPIDER_NET_FIRMWARE_SEQS 6
-#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
-#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
- SPIDER_NET_FIRMWARE_SEQWORDS * \
- sizeof(u32))
-#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
-
-/** spider_net SMMIO registers */
-#define SPIDER_NET_GHIINT0STS 0x00000000
-#define SPIDER_NET_GHIINT1STS 0x00000004
-#define SPIDER_NET_GHIINT2STS 0x00000008
-#define SPIDER_NET_GHIINT0MSK 0x00000010
-#define SPIDER_NET_GHIINT1MSK 0x00000014
-#define SPIDER_NET_GHIINT2MSK 0x00000018
-
-#define SPIDER_NET_GRESUMINTNUM 0x00000020
-#define SPIDER_NET_GREINTNUM 0x00000024
-
-#define SPIDER_NET_GFFRMNUM 0x00000028
-#define SPIDER_NET_GFAFRMNUM 0x0000002c
-#define SPIDER_NET_GFBFRMNUM 0x00000030
-#define SPIDER_NET_GFCFRMNUM 0x00000034
-#define SPIDER_NET_GFDFRMNUM 0x00000038
-
-/* clear them (don't use it) */
-#define SPIDER_NET_GFREECNNUM 0x0000003c
-#define SPIDER_NET_GONETIMENUM 0x00000040
-
-#define SPIDER_NET_GTOUTFRMNUM 0x00000044
-
-#define SPIDER_NET_GTXMDSET 0x00000050
-#define SPIDER_NET_GPCCTRL 0x00000054
-#define SPIDER_NET_GRXMDSET 0x00000058
-#define SPIDER_NET_GIPSECINIT 0x0000005c
-#define SPIDER_NET_GFTRESTRT 0x00000060
-#define SPIDER_NET_GRXDMAEN 0x00000064
-#define SPIDER_NET_GMRWOLCTRL 0x00000068
-#define SPIDER_NET_GPCWOPCMD 0x0000006c
-#define SPIDER_NET_GPCROPCMD 0x00000070
-#define SPIDER_NET_GTTFRMCNT 0x00000078
-#define SPIDER_NET_GTESTMD 0x0000007c
-
-#define SPIDER_NET_GSINIT 0x00000080
-#define SPIDER_NET_GSnPRGADR 0x00000084
-#define SPIDER_NET_GSnPRGDAT 0x00000088
-
-#define SPIDER_NET_GMACOPEMD 0x00000100
-#define SPIDER_NET_GMACLENLMT 0x00000108
-#define SPIDER_NET_GMACST 0x00000110
-#define SPIDER_NET_GMACINTEN 0x00000118
-#define SPIDER_NET_GMACPHYCTRL 0x00000120
-
-#define SPIDER_NET_GMACAPAUSE 0x00000154
-#define SPIDER_NET_GMACTXPAUSE 0x00000164
-
-#define SPIDER_NET_GMACMODE 0x000001b0
-#define SPIDER_NET_GMACBSTLMT 0x000001b4
-
-#define SPIDER_NET_GMACUNIMACU 0x000001c0
-#define SPIDER_NET_GMACUNIMACL 0x000001c8
-
-#define SPIDER_NET_GMRMHFILnR 0x00000400
-#define SPIDER_NET_MULTICAST_HASHES 256
-
-#define SPIDER_NET_GMRUAFILnR 0x00000500
-#define SPIDER_NET_GMRUA0FIL15R 0x00000578
-
-#define SPIDER_NET_GTTQMSK 0x00000934
-
-/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
- * 0x00000b.. for DMA controller B, etc. */
-#define SPIDER_NET_GDADCHA 0x00000a00
-#define SPIDER_NET_GDADMACCNTR 0x00000a04
-#define SPIDER_NET_GDACTDPA 0x00000a08
-#define SPIDER_NET_GDACTDCNT 0x00000a0c
-#define SPIDER_NET_GDACDBADDR 0x00000a20
-#define SPIDER_NET_GDACDBSIZE 0x00000a24
-#define SPIDER_NET_GDACNEXTDA 0x00000a28
-#define SPIDER_NET_GDACCOMST 0x00000a2c
-#define SPIDER_NET_GDAWBCOMST 0x00000a30
-#define SPIDER_NET_GDAWBRSIZE 0x00000a34
-#define SPIDER_NET_GDAWBVSIZE 0x00000a38
-#define SPIDER_NET_GDAWBTRST 0x00000a3c
-#define SPIDER_NET_GDAWBTRERR 0x00000a40
-
-/* TX DMA controller registers */
-#define SPIDER_NET_GDTDCHA 0x00000e00
-#define SPIDER_NET_GDTDMACCNTR 0x00000e04
-#define SPIDER_NET_GDTCDPA 0x00000e08
-#define SPIDER_NET_GDTDMASEL 0x00000e14
-
-#define SPIDER_NET_ECMODE 0x00000f00
-/* clock and reset control register */
-#define SPIDER_NET_CKRCTRL 0x00000ff0
-
-/** SCONFIG registers */
-#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
-
-/** interrupt mask registers */
-#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
-#define SPIDER_NET_INT1_MASK_VALUE 0x0000fff2
-#define SPIDER_NET_INT2_MASK_VALUE 0x000003f1
-
-/* we rely on flagged descriptor interrupts */
-#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
-/* set this first, then the FRAMENUM_VALUE */
-#define SPIDER_NET_GFXFRAMES_VALUE 0x00000000
-
-#define SPIDER_NET_STOP_SEQ_VALUE 0x00000000
-#define SPIDER_NET_RUN_SEQ_VALUE 0x0000007e
-
-#define SPIDER_NET_PHY_CTRL_VALUE 0x00040040
-/* #define SPIDER_NET_PHY_CTRL_VALUE 0x01070080*/
-#define SPIDER_NET_RXMODE_VALUE 0x00000011
-/* auto retransmission in case of MAC aborts */
-#define SPIDER_NET_TXMODE_VALUE 0x00010000
-#define SPIDER_NET_RESTART_VALUE 0x00000000
-#define SPIDER_NET_WOL_VALUE 0x00001111
-#if 0
-#define SPIDER_NET_WOL_VALUE 0x00000000
-#endif
-#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
-
-/* pause frames: automatic, no upper retransmission count */
-/* outside loopback mode: ETOMOD signal dont matter, not connected */
-/* ETOMOD signal is brought to PHY reset. bit 2 must be 1 in Celleb */
-#define SPIDER_NET_OPMODE_VALUE 0x00000067
-/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
-#define SPIDER_NET_LENLMT_VALUE 0x00000908
-
-#define SPIDER_NET_MACAPAUSE_VALUE 0x00000800 /* about 1 ms */
-#define SPIDER_NET_TXPAUSE_VALUE 0x00000000
-
-#define SPIDER_NET_MACMODE_VALUE 0x00000001
-#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */
-
-/* DMAC control register GDMACCNTR
- *
- * 1(0) enable r/tx dma
- * 0000000 fixed to 0
- *
- * 000000 fixed to 0
- * 0(1) en/disable descr writeback on force end
- * 0(1) force end
- *
- * 000000 fixed to 0
- * 00 burst alignment: 128 bytes
- * 11 burst alignment: 1024 bytes
- *
- * 00000 fixed to 0
- * 0 descr writeback size 32 bytes
- * 0(1) descr chain end interrupt enable
- * 0(1) descr status writeback enable */
-
-/* to set RX_DMA_EN */
-#define SPIDER_NET_DMA_RX_VALUE 0x80000000
-#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
-/* to set TX_DMA_EN */
-#define SPIDER_NET_TX_DMA_EN 0x80000000
-#define SPIDER_NET_GDTBSTA 0x00000300
-#define SPIDER_NET_GDTDCEIDIS 0x00000002
-#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
- SPIDER_NET_GDTDCEIDIS | \
- SPIDER_NET_GDTBSTA
-
-#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
-
-/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
-#define SPIDER_NET_UA_DESCR_VALUE 0x00080000
-#define SPIDER_NET_PROMISC_VALUE 0x00080000
-#define SPIDER_NET_NONPROMISC_VALUE 0x00000000
-
-#define SPIDER_NET_DMASEL_VALUE 0x00000001
-
-#define SPIDER_NET_ECMODE_VALUE 0x00000000
-
-#define SPIDER_NET_CKRCTRL_RUN_VALUE 0x1fff010f
-#define SPIDER_NET_CKRCTRL_STOP_VALUE 0x0000010f
-
-#define SPIDER_NET_SBIMSTATE_VALUE 0x00000000
-#define SPIDER_NET_SBTMSTATE_VALUE 0x00000000
-
-/* SPIDER_NET_GHIINT0STS bits, in reverse order so that they can be used
- * with 1 << SPIDER_NET_... */
-enum spider_net_int0_status {
- SPIDER_NET_GPHYINT = 0,
- SPIDER_NET_GMAC2INT,
- SPIDER_NET_GMAC1INT,
- SPIDER_NET_GIPSINT,
- SPIDER_NET_GFIFOINT,
- SPIDER_NET_GDMACINT,
- SPIDER_NET_GSYSINT,
- SPIDER_NET_GPWOPCMPINT,
- SPIDER_NET_GPROPCMPINT,
- SPIDER_NET_GPWFFINT,
- SPIDER_NET_GRMDADRINT,
- SPIDER_NET_GRMARPINT,
- SPIDER_NET_GRMMPINT,
- SPIDER_NET_GDTDEN0INT,
- SPIDER_NET_GDDDEN0INT,
- SPIDER_NET_GDCDEN0INT,
- SPIDER_NET_GDBDEN0INT,
- SPIDER_NET_GDADEN0INT,
- SPIDER_NET_GDTFDCINT,
- SPIDER_NET_GDDFDCINT,
- SPIDER_NET_GDCFDCINT,
- SPIDER_NET_GDBFDCINT,
- SPIDER_NET_GDAFDCINT,
- SPIDER_NET_GTTEDINT,
- SPIDER_NET_GDTDCEINT,
- SPIDER_NET_GRFDNMINT,
- SPIDER_NET_GRFCNMINT,
- SPIDER_NET_GRFBNMINT,
- SPIDER_NET_GRFANMINT,
- SPIDER_NET_GRFNMINT,
- SPIDER_NET_G1TMCNTINT,
- SPIDER_NET_GFREECNTINT
-};
-/* GHIINT1STS bits */
-enum spider_net_int1_status {
- SPIDER_NET_GTMFLLINT = 0,
- SPIDER_NET_GRMFLLINT,
- SPIDER_NET_GTMSHTINT,
- SPIDER_NET_GDTINVDINT,
- SPIDER_NET_GRFDFLLINT,
- SPIDER_NET_GDDDCEINT,
- SPIDER_NET_GDDINVDINT,
- SPIDER_NET_GRFCFLLINT,
- SPIDER_NET_GDCDCEINT,
- SPIDER_NET_GDCINVDINT,
- SPIDER_NET_GRFBFLLINT,
- SPIDER_NET_GDBDCEINT,
- SPIDER_NET_GDBINVDINT,
- SPIDER_NET_GRFAFLLINT,
- SPIDER_NET_GDADCEINT,
- SPIDER_NET_GDAINVDINT,
- SPIDER_NET_GDTRSERINT,
- SPIDER_NET_GDDRSERINT,
- SPIDER_NET_GDCRSERINT,
- SPIDER_NET_GDBRSERINT,
- SPIDER_NET_GDARSERINT,
- SPIDER_NET_GDSERINT,
- SPIDER_NET_GDTPTERINT,
- SPIDER_NET_GDDPTERINT,
- SPIDER_NET_GDCPTERINT,
- SPIDER_NET_GDBPTERINT,
- SPIDER_NET_GDAPTERINT
-};
-/* GHIINT2STS bits */
-enum spider_net_int2_status {
- SPIDER_NET_GPROPERINT = 0,
- SPIDER_NET_GMCTCRSNGINT,
- SPIDER_NET_GMCTLCOLINT,
- SPIDER_NET_GMCTTMOTINT,
- SPIDER_NET_GMCRCAERINT,
- SPIDER_NET_GMCRCALERINT,
- SPIDER_NET_GMCRALNERINT,
- SPIDER_NET_GMCROVRINT,
- SPIDER_NET_GMCRRNTINT,
- SPIDER_NET_GMCRRXERINT,
- SPIDER_NET_GTITCSERINT,
- SPIDER_NET_GTIFMTERINT,
- SPIDER_NET_GTIPKTRVKINT,
- SPIDER_NET_GTISPINGINT,
- SPIDER_NET_GTISADNGINT,
- SPIDER_NET_GTISPDNGINT,
- SPIDER_NET_GRIFMTERINT,
- SPIDER_NET_GRIPKTRVKINT,
- SPIDER_NET_GRISPINGINT,
- SPIDER_NET_GRISADNGINT,
- SPIDER_NET_GRISPDNGINT
-};
-
-#define SPIDER_NET_TXINT (1 << SPIDER_NET_GDTFDCINT)
-
-/* We rely on flagged descriptor interrupts */
-#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
-
-#define SPIDER_NET_LINKINT ( 1 << SPIDER_NET_GMAC2INT )
-
-#define SPIDER_NET_ERRINT ( 0xffffffff & \
- (~SPIDER_NET_TXINT) & \
- (~SPIDER_NET_RXINT) & \
- (~SPIDER_NET_LINKINT) )
-
-#define SPIDER_NET_GPREXEC 0x80000000
-#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
-
-#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
-#define SPIDER_NET_DMAC_TXFRMTL 0x00040000
-#define SPIDER_NET_DMAC_TCP 0x00020000
-#define SPIDER_NET_DMAC_UDP 0x00030000
-#define SPIDER_NET_TXDCEST 0x08000000
-
-#define SPIDER_NET_DESCR_RXFDIS 0x00000001
-#define SPIDER_NET_DESCR_RXDCEIS 0x00000002
-#define SPIDER_NET_DESCR_RXDEN0IS 0x00000004
-#define SPIDER_NET_DESCR_RXINVDIS 0x00000008
-#define SPIDER_NET_DESCR_RXRERRIS 0x00000010
-#define SPIDER_NET_DESCR_RXFDCIMS 0x00000100
-#define SPIDER_NET_DESCR_RXDCEIMS 0x00000200
-#define SPIDER_NET_DESCR_RXDEN0IMS 0x00000400
-#define SPIDER_NET_DESCR_RXINVDIMS 0x00000800
-#define SPIDER_NET_DESCR_RXRERRMIS 0x00001000
-#define SPIDER_NET_DESCR_UNUSED 0x077fe0e0
-
-#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
-#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
-#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
-#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
-
-#define SPIDER_NET_DESCR_BAD_STATUS (SPIDER_NET_DESCR_RXDEN0IS | \
- SPIDER_NET_DESCR_RXRERRIS | \
- SPIDER_NET_DESCR_RXDEN0IMS | \
- SPIDER_NET_DESCR_RXINVDIMS | \
- SPIDER_NET_DESCR_RXRERRMIS | \
- SPIDER_NET_DESCR_UNUSED)
-
-/* Descriptor, as defined by the hardware */
-struct spider_net_hw_descr {
- u32 buf_addr;
- u32 buf_size;
- u32 next_descr_addr;
- u32 dmac_cmd_status;
- u32 result_size;
- u32 valid_size; /* all zeroes for tx */
- u32 data_status;
- u32 data_error; /* all zeroes for tx */
-} __attribute__((aligned(32)));
-
-struct spider_net_descr {
- struct spider_net_hw_descr *hwdescr;
- struct sk_buff *skb;
- u32 bus_addr;
- struct spider_net_descr *next;
- struct spider_net_descr *prev;
-};
-
-struct spider_net_descr_chain {
- spinlock_t lock;
- struct spider_net_descr *head;
- struct spider_net_descr *tail;
- struct spider_net_descr *ring;
- int num_desc;
- struct spider_net_hw_descr *hwring;
- dma_addr_t dma_addr;
-};
-
-/* descriptor data_status bits */
-#define SPIDER_NET_RX_IPCHK 29
-#define SPIDER_NET_RX_TCPCHK 28
-#define SPIDER_NET_VLAN_PACKET 21
-#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
- (1 << SPIDER_NET_RX_TCPCHK) )
-
-/* descriptor data_error bits */
-#define SPIDER_NET_RX_IPCHKERR 27
-#define SPIDER_NET_RX_RXTCPCHKERR 28
-
-#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
-
-/* the cases we don't pass the packet to the stack.
- * 701b8000 would be correct, but every packets gets that flag */
-#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
-
-#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR | \
- NETIF_MSG_TX_QUEUED | \
- NETIF_MSG_INTR | \
- NETIF_MSG_TX_DONE | \
- NETIF_MSG_RX_STATUS | \
- NETIF_MSG_PKTDATA | \
- NETIF_MSG_HW | \
- NETIF_MSG_WOL )
-
-struct spider_net_extra_stats {
- unsigned long rx_desc_error;
- unsigned long tx_timeouts;
- unsigned long alloc_rx_skb_error;
- unsigned long rx_iommu_map_error;
- unsigned long tx_iommu_map_error;
- unsigned long rx_desc_unk_state;
-};
-
-struct spider_net_card {
- struct net_device *netdev;
- struct pci_dev *pdev;
- struct mii_phy phy;
-
- struct napi_struct napi;
-
- int medium;
-
- void __iomem *regs;
-
- struct spider_net_descr_chain tx_chain;
- struct spider_net_descr_chain rx_chain;
- struct spider_net_descr *low_watermark;
-
- int aneg_count;
- struct timer_list aneg_timer;
- struct timer_list tx_timer;
- struct work_struct tx_timeout_task;
- atomic_t tx_timeout_task_counter;
- wait_queue_head_t waitq;
- int num_rx_ints;
- int ignore_rx_ramfull;
-
- /* for ethtool */
- int msg_enable;
- struct spider_net_extra_stats spider_stats;
-
- /* Must be last item in struct */
- struct spider_net_descr darray[];
-};
-
-#endif
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
deleted file mode 100644
index fef9fd127b5e..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Network device driver for Cell Processor-Based Blade
- *
- * (C) Copyright IBM Corp. 2005
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/pci.h>
-
-#include "spider_net.h"
-
-
-static struct {
- const char str[ETH_GSTRING_LEN];
-} ethtool_stats_keys[] = {
- { "tx_packets" },
- { "tx_bytes" },
- { "rx_packets" },
- { "rx_bytes" },
- { "tx_errors" },
- { "tx_dropped" },
- { "rx_dropped" },
- { "rx_descriptor_error" },
- { "tx_timeouts" },
- { "alloc_rx_skb_error" },
- { "rx_iommu_map_error" },
- { "tx_iommu_map_error" },
- { "rx_desc_unk_state" },
-};
-
-static int
-spider_net_ethtool_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
-
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
- cmd->base.speed = card->phy.speed;
- cmd->base.duplex = DUPLEX_FULL;
-
- return 0;
-}
-
-static void
-spider_net_ethtool_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
-
- /* clear and fill out info */
- strscpy(drvinfo->driver, spider_net_driver_name,
- sizeof(drvinfo->driver));
- strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
- strscpy(drvinfo->fw_version, "no information",
- sizeof(drvinfo->fw_version));
- strscpy(drvinfo->bus_info, pci_name(card->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void
-spider_net_ethtool_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wolinfo)
-{
- /* no support for wol */
- wolinfo->supported = 0;
- wolinfo->wolopts = 0;
-}
-
-static u32
-spider_net_ethtool_get_msglevel(struct net_device *netdev)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
- return card->msg_enable;
-}
-
-static void
-spider_net_ethtool_set_msglevel(struct net_device *netdev,
- u32 level)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
- card->msg_enable = level;
-}
-
-static int
-spider_net_ethtool_nway_reset(struct net_device *netdev)
-{
- if (netif_running(netdev)) {
- spider_net_stop(netdev);
- spider_net_open(netdev);
- }
- return 0;
-}
-
-static void
-spider_net_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering,
- struct kernel_ethtool_ringparam *kernel_ering,
- struct netlink_ext_ack *extack)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
- ering->tx_pending = card->tx_chain.num_desc;
- ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
- ering->rx_pending = card->rx_chain.num_desc;
-}
-
-static int spider_net_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(ethtool_stats_keys);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void spider_net_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- data[0] = netdev->stats.tx_packets;
- data[1] = netdev->stats.tx_bytes;
- data[2] = netdev->stats.rx_packets;
- data[3] = netdev->stats.rx_bytes;
- data[4] = netdev->stats.tx_errors;
- data[5] = netdev->stats.tx_dropped;
- data[6] = netdev->stats.rx_dropped;
- data[7] = card->spider_stats.rx_desc_error;
- data[8] = card->spider_stats.tx_timeouts;
- data[9] = card->spider_stats.alloc_rx_skb_error;
- data[10] = card->spider_stats.rx_iommu_map_error;
- data[11] = card->spider_stats.tx_iommu_map_error;
- data[12] = card->spider_stats.rx_desc_unk_state;
-}
-
-static void spider_net_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
-{
- memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
-}
-
-const struct ethtool_ops spider_net_ethtool_ops = {
- .get_drvinfo = spider_net_ethtool_get_drvinfo,
- .get_wol = spider_net_ethtool_get_wol,
- .get_msglevel = spider_net_ethtool_get_msglevel,
- .set_msglevel = spider_net_ethtool_set_msglevel,
- .get_link = ethtool_op_get_link,
- .nway_reset = spider_net_ethtool_nway_reset,
- .get_ringparam = spider_net_ethtool_get_ringparam,
- .get_strings = spider_net_get_strings,
- .get_sset_count = spider_net_get_sset_count,
- .get_ethtool_stats = spider_net_get_ethtool_stats,
- .get_link_ksettings = spider_net_ethtool_get_link_ksettings,
-};
-
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c6957e3b7f0f..7e0b3d694ac0 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1379,7 +1379,7 @@ static int tsi108_close(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&data->napi);
- del_timer_sync(&data->timer);
+ timer_delete_sync(&data->timer);
tsi108_stop_ethernet(dev);
tsi108_kill_phy(dev);
@@ -1652,7 +1652,7 @@ regs_fail:
static void tsi108_timed_checker(struct timer_list *t)
{
- struct tsi108_prv_data *data = from_timer(data, t, timer);
+ struct tsi108_prv_data *data = timer_container_of(data, t, timer);
struct net_device *dev = data->dev;
tsi108_check_phy(dev);
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index 89dc4c401a8d..a75bca1243e3 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -6,7 +6,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/if_vlan.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -15,6 +17,7 @@
#include <linux/cache.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/spi/spi.h>
#include <linux/of_net.h>
@@ -33,7 +36,7 @@
#define CMD_CTR (0x2 << CMD_SHIFT)
#define CMD_MASK GENMASK(15, CMD_SHIFT)
-#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
+#define LEN_MASK GENMASK(CMD_SHIFT - 2, 0)
#define DET_CMD_LEN 4
#define DET_SOF_LEN 2
@@ -44,7 +47,6 @@
struct mse102x_stats {
u64 xfer_err;
- u64 invalid_cmd;
u64 invalid_ctr;
u64 invalid_dft;
u64 invalid_len;
@@ -55,7 +57,6 @@ struct mse102x_stats {
static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = {
"SPI transfer errors",
- "Invalid command",
"Invalid CTR",
"Invalid DFT",
"Invalid frame length",
@@ -84,6 +85,8 @@ struct mse102x_net_spi {
struct spi_message spi_msg;
struct spi_transfer spi_xfer;
+ bool valid_cmd_received;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *device_root;
#endif
@@ -97,16 +100,18 @@ static int mse102x_info_show(struct seq_file *s, void *what)
{
struct mse102x_net_spi *mses = s->private;
- seq_printf(s, "TX ring size : %u\n",
+ seq_printf(s, "TX ring size : %u\n",
skb_queue_len(&mses->mse102x.txq));
- seq_printf(s, "IRQ : %d\n",
+ seq_printf(s, "IRQ : %d\n",
mses->spidev->irq);
- seq_printf(s, "SPI effective speed : %lu\n",
+ seq_printf(s, "SPI effective speed : %lu\n",
(unsigned long)mses->spi_xfer.effective_speed_hz);
- seq_printf(s, "SPI mode : %x\n",
+ seq_printf(s, "SPI mode : %x\n",
mses->spidev->mode);
+ seq_printf(s, "Received valid CMD once : %s\n",
+ str_yes_no(mses->valid_cmd_received));
return 0;
}
@@ -193,10 +198,10 @@ static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb)
} else if (*cmd != cpu_to_be16(DET_CMD)) {
net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
__func__, *cmd);
- mse->stats.invalid_cmd++;
ret = -EIO;
} else {
memcpy(rxb, trx + 2, 2);
+ mses->valid_cmd_received = true;
}
return ret;
@@ -262,7 +267,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
}
static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
- unsigned int frame_len)
+ unsigned int frame_len, bool drop)
{
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
struct spi_transfer *xfer = &mses->spi_xfer;
@@ -280,6 +285,9 @@ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
__func__, ret);
mse->stats.xfer_err++;
+ } else if (drop) {
+ netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__);
+ ret = -EINVAL;
} else if (*sof != cpu_to_be16(DET_SOF)) {
netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
__func__, *sof);
@@ -302,61 +310,60 @@ static void mse102x_dump_packet(const char *msg, int len, const char *data)
data, len, true);
}
-static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+static irqreturn_t mse102x_rx_pkt_spi(struct mse102x_net *mse)
{
struct sk_buff *skb;
unsigned int rxalign;
unsigned int rxlen;
+ bool drop = false;
__be16 rx = 0;
u16 cmd_resp;
u8 *rxpkt;
- int ret;
mse102x_tx_cmd_spi(mse, CMD_CTR);
- ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
- cmd_resp = be16_to_cpu(rx);
-
- if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) {
+ if (mse102x_rx_cmd_spi(mse, (u8 *)&rx)) {
usleep_range(50, 100);
+ return IRQ_NONE;
+ }
- mse102x_tx_cmd_spi(mse, CMD_CTR);
- ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
- if (ret)
- return;
-
- cmd_resp = be16_to_cpu(rx);
- if ((cmd_resp & CMD_MASK) != CMD_RTS) {
- net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
- __func__, cmd_resp);
- mse->stats.invalid_rts++;
- return;
- }
-
- net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
- __func__);
+ cmd_resp = be16_to_cpu(rx);
+ if ((cmd_resp & CMD_MASK) != CMD_RTS) {
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, cmd_resp);
+ mse->stats.invalid_rts++;
+ drop = true;
+ goto drop;
}
rxlen = cmd_resp & LEN_MASK;
- if (!rxlen) {
- net_dbg_ratelimited("%s: No frame length defined\n", __func__);
+ if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) {
+ net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__,
+ rxlen);
mse->stats.invalid_len++;
- return;
+ drop = true;
}
+ /* In case of a invalid CMD_RTS, the frame must be consumed anyway.
+ * So assume the maximum possible frame length.
+ */
+drop:
+ if (drop)
+ rxlen = VLAN_ETH_FRAME_LEN;
+
rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
if (!skb)
- return;
+ return IRQ_NONE;
/* 2 bytes Start of frame (before ethernet header)
* 2 bytes Data frame tail (after ethernet frame)
* They are copied, but ignored.
*/
rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
- if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
+ if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) {
mse->ndev->stats.rx_errors++;
dev_kfree_skb(skb);
- return;
+ return IRQ_HANDLED;
}
if (netif_msg_pktdata(mse))
@@ -367,6 +374,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
mse->ndev->stats.rx_packets++;
mse->ndev->stats.rx_bytes += rxlen;
+
+ return IRQ_HANDLED;
}
static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
@@ -498,19 +507,36 @@ static irqreturn_t mse102x_irq(int irq, void *_mse)
{
struct mse102x_net *mse = _mse;
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ irqreturn_t ret;
mutex_lock(&mses->lock);
- mse102x_rx_pkt_spi(mse);
+ ret = mse102x_rx_pkt_spi(mse);
mutex_unlock(&mses->lock);
- return IRQ_HANDLED;
+ return ret;
}
static int mse102x_net_open(struct net_device *ndev)
{
+ struct irq_data *irq_data = irq_get_irq_data(ndev->irq);
struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
int ret;
+ if (!irq_data) {
+ netdev_err(ndev, "Invalid IRQ: %d\n", ndev->irq);
+ return -EINVAL;
+ }
+
+ switch (irqd_get_trigger_type(irq_data)) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_LEVEL_LOW:
+ break;
+ default:
+ netdev_warn_once(ndev, "Only IRQ type level recommended, please update your device tree firmware.\n");
+ break;
+ }
+
ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
ndev->name, mse);
if (ret < 0) {
@@ -524,6 +550,14 @@ static int mse102x_net_open(struct net_device *ndev)
netif_carrier_on(ndev);
+ /* The SPI interrupt can stuck in case of pending packet(s).
+ * So poll for possible packet(s) to re-arm the interrupt.
+ */
+ mutex_lock(&mses->lock);
+ if (mse102x_rx_pkt_spi(mse) == IRQ_NONE)
+ mse102x_rx_pkt_spi(mse);
+ mutex_unlock(&mses->lock);
+
netif_dbg(mse, ifup, ndev, "network device up\n");
return 0;
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 894911f3d560..e56ebbdd428d 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1568,7 +1568,7 @@ static void init_registers(struct net_device *dev)
if (rp->quirks & rqMgmt)
rhine_init_cam_filter(dev);
- napi_enable(&rp->napi);
+ napi_enable_locked(&rp->napi);
iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
@@ -1696,7 +1696,10 @@ static int rhine_open(struct net_device *dev)
rhine_power_init(dev);
rhine_chip_reset(dev);
rhine_task_enable(rp);
+
+ netdev_lock(dev);
init_registers(dev);
+ netdev_unlock(dev);
netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
__func__, ioread16(ioaddr + ChipCmd),
@@ -1727,6 +1730,8 @@ static void rhine_reset_task(struct work_struct *work)
napi_disable(&rp->napi);
netif_tx_disable(dev);
+
+ netdev_lock(dev);
spin_lock_bh(&rp->lock);
/* clear all descriptors */
@@ -1740,6 +1745,7 @@ static void rhine_reset_task(struct work_struct *work)
init_registers(dev);
spin_unlock_bh(&rp->lock);
+ netdev_unlock(dev);
netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
@@ -2541,9 +2547,12 @@ static int rhine_resume(struct device *device)
alloc_tbufs(dev);
rhine_reset_rbufs(rp);
rhine_task_enable(rp);
+
+ netdev_lock(dev);
spin_lock_bh(&rp->lock);
init_registers(dev);
spin_unlock_bh(&rp->lock);
+ netdev_unlock(dev);
netif_device_attach(dev);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index dd4a07c97eee..5aa93144a4f5 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2320,7 +2320,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
if (ret < 0)
goto out_free_tmp_vptr_1;
- napi_disable(&vptr->napi);
+ netdev_lock(dev);
+ napi_disable_locked(&vptr->napi);
spin_lock_irqsave(&vptr->lock, flags);
@@ -2342,12 +2343,13 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
velocity_give_many_rx_descs(vptr);
- napi_enable(&vptr->napi);
+ napi_enable_locked(&vptr->napi);
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
spin_unlock_irqrestore(&vptr->lock, flags);
+ netdev_unlock(dev);
velocity_free_rings(tmp_vptr);
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index e46ccebcfd22..e5fc942c28cc 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_WANGXUN
config LIBWX
tristate
+ depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
help
Common library for Wangxun(R) Ethernet drivers.
@@ -25,6 +26,7 @@ config LIBWX
config NGBE
tristate "Wangxun(R) GbE PCI Express adapters support"
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
select LIBWX
select PHYLINK
help
@@ -38,10 +40,11 @@ config NGBE
will be called ngbe.
config TXGBE
- tristate "Wangxun(R) 10GbE PCI Express adapters support"
+ tristate "Wangxun(R) 10/25/40GbE PCI Express adapters support"
depends on PCI
depends on COMMON_CLK
depends on I2C_DESIGNWARE_PLATFORM
+ depends on PTP_1588_CLOCK_OPTIONAL
select MARVELL_10G_PHY
select REGMAP
select PHYLINK
@@ -52,7 +55,7 @@ config TXGBE
select PCS_XPCS
select LIBWX
help
- This driver supports Wangxun(R) 10GbE PCI Express family of
+ This driver supports Wangxun(R) 10/25/40GbE PCI Express family of
adapters.
More specific information on configuring the driver is in
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index 42ccd6e4052e..9b78b604a94e 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
-libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o
+libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o wx_mbx.o wx_sriov.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index abe5921dde02..c12a4cb951f6 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -41,6 +41,9 @@ static const struct wx_stats wx_gstrings_stats[] = {
WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good),
WX_STAT("rx_csum_offload_errors", hw_csum_rx_error),
WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+ WX_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ WX_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+ WX_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
static const struct wx_stats wx_gstrings_fdir_stats[] = {
@@ -69,7 +72,7 @@ int wx_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
- return (wx->mac.type == wx_mac_sp) ?
+ return (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) ?
WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN;
default:
return -EOPNOTSUPP;
@@ -87,7 +90,7 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_STATS:
for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
for (i = 0; i < WX_FDIR_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string);
}
@@ -121,7 +124,7 @@ void wx_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
for (k = 0; k < WX_FDIR_STATS_LEN; k++) {
p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset;
data[i++] = *(u64 *)p;
@@ -196,7 +199,7 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
unsigned int stats_len = WX_STATS_LEN;
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_sp)
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
stats_len += WX_FDIR_STATS_LEN;
strscpy(info->driver, wx->driver_name, sizeof(info->driver));
@@ -216,6 +219,9 @@ int wx_nway_reset(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml40)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_nway_reset(wx->phylink);
}
EXPORT_SYMBOL(wx_nway_reset);
@@ -234,6 +240,9 @@ int wx_set_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml40)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_ksettings_set(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_set_link_ksettings);
@@ -243,6 +252,9 @@ void wx_get_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml40)
+ return;
+
phylink_ethtool_get_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_get_pauseparam);
@@ -252,6 +264,9 @@ int wx_set_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml40)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_set_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_set_pauseparam);
@@ -322,10 +337,18 @@ int wx_set_coalesce(struct net_device *netdev,
if (ec->tx_max_coalesced_frames_irq)
wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
max_eitr = WX_SP_MAX_EITR;
- else
+ break;
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ max_eitr = WX_AML_MAX_EITR;
+ break;
+ default:
max_eitr = WX_EM_MAX_EITR;
+ break;
+ }
if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
(ec->tx_coalesce_usecs > (max_eitr >> 2)))
@@ -347,10 +370,16 @@ int wx_set_coalesce(struct net_device *netdev,
wx->tx_itr_setting = ec->tx_coalesce_usecs;
if (wx->tx_itr_setting == 1) {
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
+ case wx_mac_aml40:
tx_itr_param = WX_12K_ITR;
- else
+ break;
+ default:
tx_itr_param = WX_20K_ITR;
+ break;
+ }
} else {
tx_itr_param = wx->tx_itr_setting;
}
@@ -383,7 +412,7 @@ static unsigned int wx_max_channels(struct wx *wx)
max_combined = 1;
} else {
/* support up to max allowed queues with RSS */
- if (wx->mac.type == wx_mac_sp)
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
max_combined = 63;
else
max_combined = 8;
@@ -452,3 +481,53 @@ void wx_set_msglevel(struct net_device *netdev, u32 data)
wx->msg_enable = data;
}
EXPORT_SYMBOL(wx_set_msglevel);
+
+int wx_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (wx->ptp_clock)
+ info->phc_index = ptp_clock_index(wx->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_ts_info);
+
+void wx_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ if (wx->ptp_clock) {
+ ts_stats->pkts = wx->tx_hwtstamp_pkts;
+ ts_stats->lost = wx->tx_hwtstamp_timeouts +
+ wx->tx_hwtstamp_skipped +
+ wx->rx_hwtstamp_cleared;
+ ts_stats->err = wx->tx_hwtstamp_errors;
+ }
+}
+EXPORT_SYMBOL(wx_get_ptp_stats);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
index 600c3b597d1a..9e002e699eca 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
@@ -40,4 +40,8 @@ int wx_set_channels(struct net_device *dev,
struct ethtool_channels *ch);
u32 wx_get_msglevel(struct net_device *netdev);
void wx_set_msglevel(struct net_device *netdev, u32 data);
+int wx_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info);
+void wx_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats);
#endif /* _WX_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 1bf9c38e4125..0f4be72116b8 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -10,6 +10,7 @@
#include "wx_type.h"
#include "wx_lib.h"
+#include "wx_sriov.h"
#include "wx_hw.h"
static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
@@ -112,7 +113,7 @@ static void wx_intr_disable(struct wx *wx, u64 qmask)
if (mask)
wr32(wx, WX_PX_IMS(0), mask);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMS(1), mask);
@@ -126,7 +127,8 @@ void wx_intr_enable(struct wx *wx, u64 qmask)
mask = (qmask & U32_MAX);
if (mask)
wr32(wx, WX_PX_IMC(0), mask);
- if (wx->mac.type == wx_mac_sp) {
+
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMC(1), mask);
@@ -278,22 +280,8 @@ static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
return ret;
}
-/**
- * wx_host_interface_command - Issue command to manageability block
- * @wx: pointer to the HW structure
- * @buffer: contains the command to write and where the return status will
- * be placed
- * @length: length of buffer, must be multiple of 4 bytes
- * @timeout: time in ms to wait for command completion
- * @return_data: read and return data from the buffer (true) or not (false)
- * Needed because FW structures are big endian and decoding of
- * these fields can be 8 bit or 16 bit based on command. Decoding
- * is not easily understood without making a table of commands.
- * So we will leave this up to the caller to read back the data
- * in these cases.
- **/
-int wx_host_interface_command(struct wx *wx, u32 *buffer,
- u32 length, u32 timeout, bool return_data)
+static int wx_host_interface_command_s(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
{
u32 hdr_size = sizeof(struct wx_hic_hdr);
u32 hicr, i, bi, buf[64] = {};
@@ -301,22 +289,10 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 dword_len;
u16 buf_len;
- if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
- wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
- return -EINVAL;
- }
-
status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
if (status != 0)
return status;
- /* Calculate length in DWORDs. We must be DWORD aligned */
- if ((length % (sizeof(u32))) != 0) {
- wx_err(wx, "Buffer length failure, not aligned to dword");
- status = -EINVAL;
- goto rel_out;
- }
-
dword_len = length >> 2;
/* The device driver writes the relevant command block
@@ -334,27 +310,25 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
+ buf[0] = rd32(wx, WX_MNG_MBOX);
+ if ((buf[0] & 0xff0000) >> 16 == 0x80) {
+ wx_err(wx, "Unknown FW command: 0x%x\n", buffer[0] & 0xff);
+ status = -EINVAL;
+ goto rel_out;
+ }
+
/* Check command completion */
if (status) {
- wx_dbg(wx, "Command has failed with no status valid.\n");
-
- buf[0] = rd32(wx, WX_MNG_MBOX);
- if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
- status = -EINVAL;
- goto rel_out;
- }
- if ((buf[0] & 0xff0000) >> 16 == 0x80) {
- wx_dbg(wx, "It's unknown cmd.\n");
- status = -EINVAL;
- goto rel_out;
- }
-
+ wx_err(wx, "Command has failed with no status valid.\n");
wx_dbg(wx, "write value:\n");
for (i = 0; i < dword_len; i++)
wx_dbg(wx, "%x ", buffer[i]);
wx_dbg(wx, "read value:\n");
for (i = 0; i < dword_len; i++)
wx_dbg(wx, "%x ", buf[i]);
+ wx_dbg(wx, "\ncheck: %x %x\n", buffer[0] & 0xff, ~buf[0] >> 24);
+
+ goto rel_out;
}
if (!return_data)
@@ -393,8 +367,166 @@ rel_out:
wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
return status;
}
+
+static bool wx_poll_fw_reply(struct wx *wx, u32 *buffer, u8 send_cmd)
+{
+ u32 dword_len = sizeof(struct wx_hic_hdr) >> 2;
+ struct wx_hic_hdr *recv_hdr;
+ u32 i;
+
+ /* read hdr */
+ for (i = 0; i < dword_len; i++) {
+ buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
+ le32_to_cpus(&buffer[i]);
+ }
+
+ /* check hdr */
+ recv_hdr = (struct wx_hic_hdr *)buffer;
+ if (recv_hdr->cmd == send_cmd &&
+ recv_hdr->index == wx->swfw_index)
+ return true;
+
+ return false;
+}
+
+static int wx_host_interface_command_r(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ struct wx_hic_hdr *hdr = (struct wx_hic_hdr *)buffer;
+ u32 hdr_size = sizeof(struct wx_hic_hdr);
+ bool busy, reply;
+ u32 dword_len;
+ u16 buf_len;
+ int err = 0;
+ u8 send_cmd;
+ u32 i;
+
+ /* wait to get lock */
+ might_sleep();
+ err = read_poll_timeout(test_and_set_bit, busy, !busy, 1000, timeout * 1000,
+ false, WX_STATE_SWFW_BUSY, wx->state);
+ if (err)
+ return err;
+
+ /* index to unique seq id for each mbox message */
+ hdr->index = wx->swfw_index;
+ send_cmd = hdr->cmd;
+
+ dword_len = length >> 2;
+ /* write data to SW-FW mbox array */
+ for (i = 0; i < dword_len; i++) {
+ wr32a(wx, WX_SW2FW_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
+ /* write flush */
+ rd32a(wx, WX_SW2FW_MBOX, i);
+ }
+
+ /* generate interrupt to notify FW */
+ wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, 0);
+ wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD);
+
+ /* polling reply from FW */
+ err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 2000,
+ timeout * 1000, true, wx, buffer, send_cmd);
+ if (err) {
+ wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n",
+ send_cmd, wx->swfw_index);
+ goto rel_out;
+ }
+
+ if (hdr->cmd_or_resp.ret_status == 0x80) {
+ wx_err(wx, "Unknown FW command: 0x%x\n", send_cmd);
+ err = -EINVAL;
+ goto rel_out;
+ }
+
+ /* expect no reply from FW then return */
+ if (!return_data)
+ goto rel_out;
+
+ /* If there is any thing in data position pull it in */
+ buf_len = hdr->buf_len;
+ if (buf_len == 0)
+ goto rel_out;
+
+ if (length < buf_len + hdr_size) {
+ wx_err(wx, "Buffer not large enough for reply message.\n");
+ err = -EFAULT;
+ goto rel_out;
+ }
+
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
+ for (i = hdr_size >> 2; i <= dword_len; i++) {
+ buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
+ le32_to_cpus(&buffer[i]);
+ }
+
+rel_out:
+ /* index++, index replace wx_hic_hdr.checksum */
+ if (wx->swfw_index == WX_HIC_HDR_INDEX_MAX)
+ wx->swfw_index = 0;
+ else
+ wx->swfw_index++;
+
+ clear_bit(WX_STATE_SWFW_BUSY, wx->state);
+ return err;
+}
+
+/**
+ * wx_host_interface_command - Issue command to manageability block
+ * @wx: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
+ **/
+int wx_host_interface_command(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
+ wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
+ return -EINVAL;
+ }
+
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if ((length % (sizeof(u32))) != 0) {
+ wx_err(wx, "Buffer length failure, not aligned to dword");
+ return -EINVAL;
+ }
+
+ if (test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ return wx_host_interface_command_r(wx, buffer, length,
+ timeout, return_data);
+
+ return wx_host_interface_command_s(wx, buffer, length, timeout, return_data);
+}
EXPORT_SYMBOL(wx_host_interface_command);
+int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles)
+{
+ struct wx_hic_set_pps pps_cmd;
+
+ pps_cmd.hdr.cmd = FW_PPS_SET_CMD;
+ pps_cmd.hdr.buf_len = FW_PPS_SET_LEN;
+ pps_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ pps_cmd.lan_id = wx->bus.func;
+ pps_cmd.enable = (u8)enable;
+ pps_cmd.nsec = nsec;
+ pps_cmd.cycles = cycles;
+ pps_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+
+ return wx_host_interface_command(wx, (u32 *)&pps_cmd,
+ sizeof(pps_cmd),
+ WX_HI_COMMAND_TIMEOUT,
+ false);
+}
+
/**
* wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
* assuming that the semaphore is already obtained.
@@ -425,7 +557,10 @@ static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
if (status != 0)
return status;
- *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+ else
+ *data = (u16)rd32a(wx, WX_FW2SW_MBOX, FW_NVM_DATA_OFFSET);
return status;
}
@@ -469,6 +604,7 @@ int wx_read_ee_hostif_buffer(struct wx *wx,
u16 words_to_read;
u32 value = 0;
int status;
+ u32 mbox;
u32 i;
/* Take semaphore for the entire operation. */
@@ -501,8 +637,12 @@ int wx_read_ee_hostif_buffer(struct wx *wx,
goto out;
}
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ mbox = WX_MNG_MBOX;
+ else
+ mbox = WX_FW2SW_MBOX;
for (i = 0; i < words_to_read; i++) {
- u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
+ u32 reg = mbox + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
value = rd32(wx, reg);
data[current_word] = (u16)(value & 0xffff);
@@ -552,12 +692,18 @@ void wx_init_eeprom_params(struct wx *wx)
}
}
- if (wx->mac.type == wx_mac_sp) {
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
+ case wx_mac_aml40:
if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
wx_err(wx, "NVM Read Error\n");
return;
}
data = data >> 1;
+ break;
+ default:
+ break;
}
eeprom->sw_region_offset = data;
@@ -618,7 +764,8 @@ static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
/* setup VMDq pool mapping */
wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
- if (wx->mac.type == wx_mac_sp)
+
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
/* HW expects these in little endian so we reverse the byte
@@ -757,7 +904,7 @@ void wx_init_rx_addrs(struct wx *wx)
wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
/* clear VMDq pool/queue selection for RAR 0 */
wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
}
@@ -804,11 +951,28 @@ static void wx_sync_mac_table(struct wx *wx)
}
}
+static void wx_full_sync_mac_table(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
+ wx_set_rar(wx, i,
+ wx->mac_table[i].addr,
+ wx->mac_table[i].pools,
+ WX_PSR_MAC_SWC_AD_H_AV);
+ } else {
+ wx_clear_rar(wx, i);
+ }
+ wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
+ }
+}
+
/* this function destroys the first RAR entry */
void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
{
memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
- wx->mac_table[0].pools = 1ULL;
+ wx->mac_table[0].pools = BIT(VMDQ_P(0));
wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
wx_set_rar(wx, 0, wx->mac_table[0].addr,
wx->mac_table[0].pools,
@@ -833,7 +997,7 @@ void wx_flush_sw_mac_table(struct wx *wx)
}
EXPORT_SYMBOL(wx_flush_sw_mac_table);
-static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
{
u32 i;
@@ -864,7 +1028,7 @@ static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
return -ENOMEM;
}
-static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
{
u32 i;
@@ -1046,6 +1210,35 @@ static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
wx_dbg(wx, "Update mc addr list Complete\n");
}
+static void wx_restore_vf_multicasts(struct wx *wx)
+{
+ u32 i, j, vector_bit, vector_reg;
+ struct vf_data_storage *vfinfo;
+
+ for (i = 0; i < wx->num_vfs; i++) {
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(i));
+
+ vfinfo = &wx->vfinfo[i];
+ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+ wx->addr_ctrl.mta_in_use++;
+ vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[j]);
+ vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[j]);
+ wr32m(wx, WX_PSR_MC_TBL(vector_reg),
+ BIT(vector_bit), BIT(vector_bit));
+ /* errata 5: maintain a copy of the reg table conf */
+ wx->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
+ }
+ if (vfinfo->num_vf_mc_hashes)
+ vmolr |= WX_PSR_VM_L2CTL_ROMPE;
+ else
+ vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
+ wr32(wx, WX_PSR_VM_L2CTL(i), vmolr);
+ }
+
+ /* Restore any VF macvlans */
+ wx_full_sync_mac_table(wx);
+}
+
/**
* wx_write_mc_addr_list - write multicast addresses to MTA
* @netdev: network interface device structure
@@ -1063,6 +1256,9 @@ static int wx_write_mc_addr_list(struct net_device *netdev)
wx_update_mc_addr_list(wx, netdev);
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ wx_restore_vf_multicasts(wx);
+
return netdev_mc_count(netdev);
}
@@ -1083,7 +1279,7 @@ int wx_set_mac(struct net_device *netdev, void *p)
if (retval)
return retval;
- wx_del_mac_filter(wx, wx->mac.addr, 0);
+ wx_del_mac_filter(wx, wx->mac.addr, VMDQ_P(0));
eth_hw_addr_set(netdev, addr->sa_data);
memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
@@ -1185,6 +1381,10 @@ static int wx_hpbthresh(struct wx *wx)
/* Calculate delay value for device */
dv_id = WX_DV(link, tc);
+ /* Loopback switch introduces additional latency */
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ dv_id += WX_B2BT(tc);
+
/* Delay value is calculated in bit times convert to KB */
kb = WX_BT2KB(dv_id);
rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
@@ -1240,12 +1440,107 @@ static void wx_pbthresh_setup(struct wx *wx)
wx->fc.low_water = 0;
}
+static void wx_set_ethertype_anti_spoofing(struct wx *wx, bool enable, int vf)
+{
+ u32 pfvfspoof, reg_offset, vf_shift;
+
+ vf_shift = WX_VF_IND_SHIFT(vf);
+ reg_offset = WX_VF_REG_OFFSET(vf);
+
+ pfvfspoof = rd32(wx, WX_TDM_ETYPE_AS(reg_offset));
+ if (enable)
+ pfvfspoof |= BIT(vf_shift);
+ else
+ pfvfspoof &= ~BIT(vf_shift);
+ wr32(wx, WX_TDM_ETYPE_AS(reg_offset), pfvfspoof);
+}
+
+int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+ u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
+ struct wx *wx = netdev_priv(netdev);
+ u32 regval;
+
+ if (vf >= wx->num_vfs)
+ return -EINVAL;
+
+ wx->vfinfo[vf].spoofchk_enabled = setting;
+
+ regval = (setting << vf_bit);
+ wr32m(wx, WX_TDM_MAC_AS(index), regval | BIT(vf_bit), regval);
+
+ if (wx->vfinfo[vf].vlan_count)
+ wr32m(wx, WX_TDM_VLAN_AS(index), regval | BIT(vf_bit), regval);
+
+ return 0;
+}
+
+static void wx_configure_virtualization(struct wx *wx)
+{
+ u16 pool = wx->num_rx_pools;
+ u32 reg_offset, vf_shift;
+ u32 i;
+
+ if (!test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ return;
+
+ wr32m(wx, WX_PSR_VM_CTL,
+ WX_PSR_VM_CTL_POOL_MASK | WX_PSR_VM_CTL_REPLEN,
+ FIELD_PREP(WX_PSR_VM_CTL_POOL_MASK, VMDQ_P(0)) |
+ WX_PSR_VM_CTL_REPLEN);
+ while (pool--)
+ wr32m(wx, WX_PSR_VM_L2CTL(pool),
+ WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE);
+
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ vf_shift = BIT(VMDQ_P(0));
+ /* Enable only the PF pools for Tx/Rx */
+ wr32(wx, WX_RDM_VF_RE(0), vf_shift);
+ wr32(wx, WX_TDM_VF_TE(0), vf_shift);
+ } else {
+ vf_shift = WX_VF_IND_SHIFT(VMDQ_P(0));
+ reg_offset = WX_VF_REG_OFFSET(VMDQ_P(0));
+
+ /* Enable only the PF pools for Tx/Rx */
+ wr32(wx, WX_RDM_VF_RE(reg_offset), GENMASK(31, vf_shift));
+ wr32(wx, WX_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1);
+ wr32(wx, WX_TDM_VF_TE(reg_offset), GENMASK(31, vf_shift));
+ wr32(wx, WX_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1);
+ }
+
+ /* clear VLAN promisc flag so VFTA will be updated if necessary */
+ clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
+
+ for (i = 0; i < wx->num_vfs; i++) {
+ if (!wx->vfinfo[i].spoofchk_enabled)
+ wx_set_vf_spoofchk(wx->netdev, i, false);
+ /* enable ethertype anti spoofing if hw supports it */
+ wx_set_ethertype_anti_spoofing(wx, true, i);
+ }
+}
+
static void wx_configure_port(struct wx *wx)
{
u32 value, i;
- value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ value = (wx->num_vfs == 0) ?
+ WX_CFG_PORT_CTL_NUM_VT_NONE :
+ WX_CFG_PORT_CTL_NUM_VT_8;
+ } else {
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
+ if (wx->ring_feature[RING_F_RSS].indices == 4)
+ value = WX_CFG_PORT_CTL_NUM_VT_32;
+ else
+ value = WX_CFG_PORT_CTL_NUM_VT_64;
+ } else {
+ value = 0;
+ }
+ }
+
+ value |= WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_NUM_VT_MASK |
WX_CFG_PORT_CTL_D_VLAN |
WX_CFG_PORT_CTL_QINQ,
value);
@@ -1306,6 +1601,83 @@ static void wx_vlan_strip_control(struct wx *wx, bool enable)
}
}
+static void wx_vlan_promisc_enable(struct wx *wx)
+{
+ u32 vlnctrl, i, vind, bits, reg_idx;
+
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
+ /* we need to keep the VLAN filter on in SRIOV */
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ } else {
+ vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ return;
+ }
+ /* We are already in VLAN promisc, nothing to do */
+ if (test_bit(WX_FLAG_VLAN_PROMISC, wx->flags))
+ return;
+ /* Set flag so we don't redo unnecessary work */
+ set_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
+ /* Add PF to all active pools */
+ for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
+ vind = WX_VF_IND_SHIFT(VMDQ_P(0));
+ reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0));
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
+ bits |= BIT(vind);
+ wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
+ }
+ /* Set all bits in the VLAN filter table array */
+ for (i = 0; i < wx->mac.vft_size; i++)
+ wr32(wx, WX_PSR_VLAN_TBL(i), U32_MAX);
+}
+
+static void wx_scrub_vfta(struct wx *wx)
+{
+ u32 i, vid, bits, vfta, vind, vlvf, reg_idx;
+
+ for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC_IDX);
+ /* pull VLAN ID from VLVF */
+ vid = vlvf & ~WX_PSR_VLAN_SWC_VIEN;
+ if (vlvf & WX_PSR_VLAN_SWC_VIEN) {
+ /* if PF is part of this then continue */
+ if (test_bit(vid, wx->active_vlans))
+ continue;
+ }
+ /* remove PF from the pool */
+ vind = WX_VF_IND_SHIFT(VMDQ_P(0));
+ reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0));
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
+ bits &= ~BIT(vind);
+ wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
+ }
+ /* extract values from vft_shadow and write back to VFTA */
+ for (i = 0; i < wx->mac.vft_size; i++) {
+ vfta = wx->mac.vft_shadow[i];
+ wr32(wx, WX_PSR_VLAN_TBL(i), vfta);
+ }
+}
+
+static void wx_vlan_promisc_disable(struct wx *wx)
+{
+ u32 vlnctrl;
+
+ /* configure vlan filtering */
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ /* We are not in VLAN promisc, nothing to do */
+ if (!test_bit(WX_FLAG_VLAN_PROMISC, wx->flags))
+ return;
+ /* Set flag so we don't redo unnecessary work */
+ clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
+ wx_scrub_vfta(wx);
+}
+
void wx_set_rx_mode(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
@@ -1318,7 +1690,7 @@ void wx_set_rx_mode(struct net_device *netdev)
/* Check for Promiscuous and All Multicast modes */
fctrl = rd32(wx, WX_PSR_CTL);
fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
- vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)));
vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
WX_PSR_VM_L2CTL_MPE |
WX_PSR_VM_L2CTL_ROPE |
@@ -1339,7 +1711,10 @@ void wx_set_rx_mode(struct net_device *netdev)
fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
/* pf don't want packets routing to vf, so clear UPE */
vmolr |= WX_PSR_VM_L2CTL_MPE;
- vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
}
if (netdev->flags & IFF_ALLMULTI) {
@@ -1362,7 +1737,7 @@ void wx_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = wx_write_uc_addr_list(netdev, 0);
+ count = wx_write_uc_addr_list(netdev, VMDQ_P(0));
if (count < 0) {
vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
vmolr |= WX_PSR_VM_L2CTL_UPE;
@@ -1380,7 +1755,7 @@ void wx_set_rx_mode(struct net_device *netdev)
wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
wr32(wx, WX_PSR_CTL, fctrl);
- wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
+ wr32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)), vmolr);
if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
(features & NETIF_F_HW_VLAN_STAG_RX))
@@ -1388,6 +1763,10 @@ void wx_set_rx_mode(struct net_device *netdev)
else
wx_vlan_strip_control(wx, false);
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ wx_vlan_promisc_disable(wx);
+ else
+ wx_vlan_promisc_enable(wx);
}
EXPORT_SYMBOL(wx_set_rx_mode);
@@ -1637,6 +2016,13 @@ static void wx_setup_reta(struct wx *wx)
u32 random_key_size = WX_RSS_KEY_SIZE / 4;
u32 i, j;
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) {
+ if (wx->mac.type == wx_mac_em)
+ rss_i = 1;
+ else
+ rss_i = rss_i < 4 ? 4 : rss_i;
+ }
+
/* Fill out hash function seeds */
for (i = 0; i < random_key_size; i++)
wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
@@ -1654,10 +2040,42 @@ static void wx_setup_reta(struct wx *wx)
wx_store_reta(wx);
}
+#define WX_RDB_RSS_PL_2 FIELD_PREP(GENMASK(31, 29), 1)
+#define WX_RDB_RSS_PL_4 FIELD_PREP(GENMASK(31, 29), 2)
+static void wx_setup_psrtype(struct wx *wx)
+{
+ int rss_i = wx->ring_feature[RING_F_RSS].indices;
+ u32 psrtype;
+ int pool;
+
+ psrtype = WX_RDB_PL_CFG_L4HDR |
+ WX_RDB_PL_CFG_L3HDR |
+ WX_RDB_PL_CFG_L2HDR |
+ WX_RDB_PL_CFG_TUN_OUTL2HDR |
+ WX_RDB_PL_CFG_TUN_TUNHDR;
+
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ for_each_set_bit(pool, &wx->fwd_bitmask, 8)
+ wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
+ } else {
+ if (rss_i > 3)
+ psrtype |= WX_RDB_RSS_PL_4;
+ else if (rss_i > 1)
+ psrtype |= WX_RDB_RSS_PL_2;
+
+ for_each_set_bit(pool, &wx->fwd_bitmask, 32)
+ wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
+ }
+}
+
static void wx_setup_mrqc(struct wx *wx)
{
u32 rss_field = 0;
+ /* VT, and RSS do not coexist at the same time */
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return;
+
/* Disable indicating checksum in descriptor, enables RSS hash */
wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
@@ -1687,21 +2105,16 @@ static void wx_setup_mrqc(struct wx *wx)
**/
void wx_configure_rx(struct wx *wx)
{
- u32 psrtype, i;
int ret;
+ u32 i;
wx_disable_rx(wx);
-
- psrtype = WX_RDB_PL_CFG_L4HDR |
- WX_RDB_PL_CFG_L3HDR |
- WX_RDB_PL_CFG_L2HDR |
- WX_RDB_PL_CFG_TUN_TUNHDR;
- wr32(wx, WX_RDB_PL_CFG(0), psrtype);
+ wx_setup_psrtype(wx);
/* enable hw crc stripping */
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
u32 psrctl;
/* RSC Setup */
@@ -1744,6 +2157,7 @@ void wx_configure(struct wx *wx)
{
wx_set_rxpba(wx);
wx_pbthresh_setup(wx);
+ wx_configure_virtualization(wx);
wx_configure_port(wx);
wx_set_rx_mode(wx->netdev);
@@ -1838,10 +2252,8 @@ int wx_stop_adapter(struct wx *wx)
}
EXPORT_SYMBOL(wx_stop_adapter);
-void wx_reset_misc(struct wx *wx)
+void wx_reset_mac(struct wx *wx)
{
- int i;
-
/* receive packets that size > 2048 */
wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
@@ -1853,6 +2265,14 @@ void wx_reset_misc(struct wx *wx)
WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE);
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+}
+EXPORT_SYMBOL(wx_reset_mac);
+
+void wx_reset_misc(struct wx *wx)
+{
+ int i;
+
+ wx_reset_mac(wx);
wr32m(wx, WX_MIS_RST_ST,
WX_MIS_RST_ST_RST_INIT, 0x1E00);
@@ -2102,7 +2522,7 @@ static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
+int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
{
u32 bitindex, vfta, targetbit;
bool vfta_changed = false;
@@ -2353,12 +2773,13 @@ void wx_update_stats(struct wx *wx)
hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH);
hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
}
- for (i = 0; i < wx->mac.max_rx_queues; i++)
+ for (i = wx->num_vfs * wx->num_rx_queues_per_pool;
+ i < wx->mac.max_rx_queues; i++)
hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
}
EXPORT_SYMBOL(wx_update_stats);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 11fb33349482..26a56cba60b9 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -18,6 +18,7 @@ void wx_control_hw(struct wx *wx, bool drv);
int wx_mng_present(struct wx *wx);
int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 length, u32 timeout, bool return_data);
+int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles);
int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data);
int wx_read_ee_hostif_buffer(struct wx *wx,
u16 offset, u16 words, u16 *data);
@@ -25,9 +26,12 @@ void wx_init_eeprom_params(struct wx *wx);
void wx_get_mac_addr(struct wx *wx, u8 *mac_addr);
void wx_init_rx_addrs(struct wx *wx);
void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
+int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool);
+int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool);
void wx_flush_sw_mac_table(struct wx *wx);
int wx_set_mac(struct net_device *netdev, void *p);
void wx_disable_rx(struct wx *wx);
+int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int wx_disable_sec_rx_path(struct wx *wx);
void wx_enable_sec_rx_path(struct wx *wx);
void wx_set_rx_mode(struct net_device *netdev);
@@ -38,9 +42,11 @@ void wx_configure(struct wx *wx);
void wx_start_hw(struct wx *wx);
int wx_disable_pcie_master(struct wx *wx);
int wx_stop_adapter(struct wx *wx);
+void wx_reset_mac(struct wx *wx);
void wx_reset_misc(struct wx *wx);
int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
int wx_sw_init(struct wx *wx);
+int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on);
int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 2b3d6586f44a..55e252789db3 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -5,6 +5,7 @@
#include <net/ip6_checksum.h>
#include <net/page_pool/helpers.h>
#include <net/inet_ecn.h>
+#include <linux/workqueue.h>
#include <linux/iopoll.h>
#include <linux/sctp.h>
#include <linux/pci.h>
@@ -13,6 +14,7 @@
#include "wx_type.h"
#include "wx_lib.h"
+#include "wx_ptp.h"
#include "wx_hw.h"
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
@@ -309,7 +311,8 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
return true;
page = page_pool_dev_alloc_pages(rx_ring->page_pool);
- WARN_ON(!page);
+ if (unlikely(!page))
+ return false;
dma = page_pool_get_dma_addr(page);
bi->page_dma = dma;
@@ -545,7 +548,8 @@ static void wx_rx_checksum(struct wx_ring *ring,
return;
/* Hardware can't guarantee csum if IPv6 Dest Header found */
- if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && WX_RXD_IPV6EX(rx_desc))
+ if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP &&
+ wx_test_staterr(rx_desc, WX_RXD_STAT_IPV6EX))
return;
/* if L4 checksum error */
@@ -597,8 +601,17 @@ static void wx_process_skb_fields(struct wx_ring *rx_ring,
union wx_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ struct wx *wx = netdev_priv(rx_ring->netdev);
+
wx_rx_hash(rx_ring, rx_desc, skb);
wx_rx_checksum(rx_ring, rx_desc, skb);
+
+ if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, wx->flags)) &&
+ unlikely(wx_test_staterr(rx_desc, WX_RXD_STAT_TS))) {
+ wx_ptp_rx_hwtstamp(rx_ring->q_vector->wx, skb);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
+
wx_rx_vlan(rx_ring, rx_desc, skb);
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -705,6 +718,7 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
{
unsigned int budget = q_vector->wx->tx_work_limit;
unsigned int total_bytes = 0, total_packets = 0;
+ struct wx *wx = netdev_priv(tx_ring->netdev);
unsigned int i = tx_ring->next_to_clean;
struct wx_tx_buffer *tx_buffer;
union wx_tx_desc *tx_desc;
@@ -737,6 +751,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
+ /* schedule check for Tx timestamp */
+ if (unlikely(test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state)) &&
+ skb_shinfo(tx_buffer->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ ptp_schedule_worker(wx->ptp_clock, 0);
+
/* free the skb */
napi_consume_skb(tx_buffer->skb, napi_budget);
@@ -932,9 +951,9 @@ static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
-static void wx_tx_map(struct wx_ring *tx_ring,
- struct wx_tx_buffer *first,
- const u8 hdr_len)
+static int wx_tx_map(struct wx_ring *tx_ring,
+ struct wx_tx_buffer *first,
+ const u8 hdr_len)
{
struct sk_buff *skb = first->skb;
struct wx_tx_buffer *tx_buffer;
@@ -1013,6 +1032,8 @@ static void wx_tx_map(struct wx_ring *tx_ring,
netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount);
+ /* set the timestamp */
+ first->time_stamp = jiffies;
skb_tx_timestamp(skb);
/* Force memory writes to complete before letting h/w know there
@@ -1038,7 +1059,7 @@ static void wx_tx_map(struct wx_ring *tx_ring,
if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more())
writel(i, tx_ring->tail);
- return;
+ return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
@@ -1062,6 +1083,8 @@ dma_error:
first->skb = NULL;
tx_ring->next_to_use = i;
+
+ return -ENOMEM;
}
static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
@@ -1082,26 +1105,6 @@ static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
-static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr)
-{
- struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset);
-
- *nexthdr = hdr->nexthdr;
- offset += sizeof(struct ipv6hdr);
- while (ipv6_ext_hdr(*nexthdr)) {
- struct ipv6_opt_hdr _hdr, *hp;
-
- if (*nexthdr == NEXTHDR_NONE)
- return;
- hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr);
- if (!hp)
- return;
- if (*nexthdr == NEXTHDR_FRAGMENT)
- break;
- *nexthdr = hp->nexthdr;
- }
-}
-
union network_header {
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
@@ -1112,6 +1115,8 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
{
u8 tun_prot = 0, l4_prot = 0, ptype = 0;
struct sk_buff *skb = first->skb;
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
if (skb->encapsulation) {
union network_header hdr;
@@ -1122,14 +1127,18 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
ptype = WX_PTYPE_TUN_IPV4;
break;
case htons(ETH_P_IPV6):
- wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot);
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
+ tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off);
ptype = WX_PTYPE_TUN_IPV6;
break;
default:
return ptype;
}
- if (tun_prot == IPPROTO_IPIP) {
+ if (tun_prot == IPPROTO_IPIP || tun_prot == IPPROTO_IPV6) {
hdr.raw = (void *)inner_ip_hdr(skb);
ptype |= WX_PTYPE_PKT_IPIP;
} else if (tun_prot == IPPROTO_UDP) {
@@ -1166,7 +1175,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
l4_prot = hdr.ipv4->protocol;
break;
case 6:
- wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot);
+ l4_hdr = skb_inner_transport_header(skb);
+ exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr);
+ l4_prot = inner_ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
ptype |= WX_PTYPE_PKT_IPV6;
break;
default:
@@ -1179,7 +1192,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
ptype = WX_PTYPE_PKT_IP;
break;
case htons(ETH_P_IPV6):
- wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot);
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
+ l4_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6;
break;
default:
@@ -1269,13 +1286,20 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
if (enc) {
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
+
switch (first->protocol) {
case htons(ETH_P_IP):
tun_prot = ip_hdr(skb)->protocol;
first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4;
break;
case htons(ETH_P_IPV6):
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off);
break;
default:
break;
@@ -1298,6 +1322,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
(char *)ip_hdr(skb)) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT;
@@ -1335,12 +1360,15 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
u8 tun_prot = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
+csum_failed:
if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) &&
!(first->tx_flags & WX_TX_FLAGS_CC))
return;
vlan_macip_lens = skb_network_offset(skb) <<
WX_TXD_MACLEN_SHIFT;
} else {
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
u8 l4_prot = 0;
union {
struct iphdr *ipv4;
@@ -1362,7 +1390,12 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
tun_prot = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &tun_prot, &frag_off);
break;
default:
return;
@@ -1386,6 +1419,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
(char *)ip_hdr(skb)) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT;
@@ -1408,7 +1442,10 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
break;
case 6:
vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1;
+ exthdr = network_hdr.raw + sizeof(struct ipv6hdr);
l4_prot = network_hdr.ipv6->nexthdr;
+ if (transport_hdr.raw != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
break;
default:
break;
@@ -1428,7 +1465,8 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_L4LEN_SHIFT;
break;
default:
- break;
+ skb_checksum_help(skb);
+ goto csum_failed;
}
/* update TX checksum flag */
@@ -1486,6 +1524,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= WX_TX_FLAGS_HW_VLAN;
}
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ wx->ptp_clock) {
+ if (wx->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(WX_STATE_PTP_TX_IN_PROGRESS,
+ wx->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= WX_TX_FLAGS_TSTAMP;
+ wx->ptp_tx_skb = skb_get(skb);
+ wx->ptp_tx_start = jiffies;
+ } else {
+ wx->tx_hwtstamp_skipped++;
+ }
+ }
+
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = vlan_get_protocol(skb);
@@ -1501,12 +1553,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate)
wx->atr(tx_ring, first, ptype);
- wx_tx_map(tx_ring, first, hdr_len);
+ if (wx_tx_map(tx_ring, first, hdr_len))
+ goto cleanup_tx_tstamp;
return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(first->skb);
first->skb = NULL;
+cleanup_tx_tstamp:
+ if (unlikely(tx_flags & WX_TX_FLAGS_TSTAMP)) {
+ dev_kfree_skb_any(wx->ptp_tx_skb);
+ wx->ptp_tx_skb = NULL;
+ wx->tx_hwtstamp_errors++;
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+ }
return NETDEV_TX_OK;
}
@@ -1561,6 +1621,65 @@ void wx_napi_disable_all(struct wx *wx)
}
EXPORT_SYMBOL(wx_napi_disable_all);
+static bool wx_set_vmdq_queues(struct wx *wx)
+{
+ u16 vmdq_i = wx->ring_feature[RING_F_VMDQ].limit;
+ u16 rss_i = wx->ring_feature[RING_F_RSS].limit;
+ u16 rss_m = WX_RSS_DISABLED_MASK;
+ u16 vmdq_m = 0;
+
+ /* only proceed if VMDq is enabled */
+ if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return false;
+ /* Add starting offset to total pool count */
+ vmdq_i += wx->ring_feature[RING_F_VMDQ].offset;
+
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, 64, vmdq_i);
+
+ /* 64 pool mode with 2 queues per pool, or
+ * 16/32/64 pool mode with 1 queue per pool
+ */
+ if (vmdq_i > 32 || rss_i < 4) {
+ vmdq_m = WX_VMDQ_2Q_MASK;
+ rss_m = WX_RSS_2Q_MASK;
+ rss_i = min_t(u16, rss_i, 2);
+ /* 32 pool mode with 4 queues per pool */
+ } else {
+ vmdq_m = WX_VMDQ_4Q_MASK;
+ rss_m = WX_RSS_4Q_MASK;
+ rss_i = 4;
+ }
+ } else {
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, 8, vmdq_i);
+
+ /* when VMDQ on, disable RSS */
+ rss_i = 1;
+ }
+
+ /* remove the starting offset from the pool count */
+ vmdq_i -= wx->ring_feature[RING_F_VMDQ].offset;
+
+ /* save features for later use */
+ wx->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+ wx->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+ /* limit RSS based on user input and save for later use */
+ wx->ring_feature[RING_F_RSS].indices = rss_i;
+ wx->ring_feature[RING_F_RSS].mask = rss_m;
+
+ wx->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/
+ wx->num_rx_pools = vmdq_i;
+ wx->num_rx_queues_per_pool = rss_i;
+
+ wx->num_rx_queues = vmdq_i * rss_i;
+ wx->num_tx_queues = vmdq_i * rss_i;
+
+ return true;
+}
+
/**
* wx_set_rss_queues: Allocate queues for RSS
* @wx: board private structure to initialize
@@ -1575,6 +1694,10 @@ static void wx_set_rss_queues(struct wx *wx)
/* set mask for 16 queue limit of RSS */
f = &wx->ring_feature[RING_F_RSS];
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ f->mask = WX_RSS_64Q_MASK;
+ else
+ f->mask = WX_RSS_8Q_MASK;
f->indices = f->limit;
if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
@@ -1582,6 +1705,7 @@ static void wx_set_rss_queues(struct wx *wx)
clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ wx->ring_feature[RING_F_FDIR].indices = 1;
/* Use Flow Director in addition to RSS to ensure the best
* distribution of flows across cores, even when an FDIR flow
* isn't matched.
@@ -1607,6 +1731,9 @@ static void wx_set_num_queues(struct wx *wx)
wx->num_tx_queues = 1;
wx->queues_per_pool = 1;
+ if (wx_set_vmdq_queues(wx))
+ return;
+
wx_set_rss_queues(wx);
}
@@ -1620,7 +1747,7 @@ static void wx_set_num_queues(struct wx *wx)
*/
static int wx_acquire_msix_vectors(struct wx *wx)
{
- struct irq_affinity affd = { .pre_vectors = 1 };
+ struct irq_affinity affd = { .post_vectors = 1 };
int nvecs, i;
/* We start by asking for one vector per queue pair */
@@ -1657,16 +1784,24 @@ static int wx_acquire_msix_vectors(struct wx *wx)
return nvecs;
}
- wx->msix_entry->entry = 0;
- wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
nvecs -= 1;
for (i = 0; i < nvecs; i++) {
wx->msix_q_entries[i].entry = i;
- wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1);
+ wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i);
}
wx->num_q_vectors = nvecs;
+ wx->msix_entry->entry = nvecs;
+ wx->msix_entry->vector = pci_irq_vector(wx->pdev, nvecs);
+
+ if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags)) {
+ wx->msix_entry->entry = 0;
+ wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
+ wx->msix_q_entries[0].entry = 0;
+ wx->msix_q_entries[0].vector = pci_irq_vector(wx->pdev, 1);
+ }
+
return 0;
}
@@ -1687,6 +1822,10 @@ static int wx_set_interrupt_capability(struct wx *wx)
if (ret == 0 || (ret == -ENOMEM))
return ret;
+ /* Disable VMDq support */
+ dev_warn(&wx->pdev->dev, "Disabling VMQQ support\n");
+ clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+
/* Disable RSS */
dev_warn(&wx->pdev->dev, "Disabling RSS support\n");
wx->ring_feature[RING_F_RSS].limit = 1;
@@ -1713,6 +1852,49 @@ static int wx_set_interrupt_capability(struct wx *wx)
return 0;
}
+static bool wx_cache_ring_vmdq(struct wx *wx)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ struct wx_ring_feature *rss = &wx->ring_feature[RING_F_RSS];
+ u16 reg_idx;
+ int i;
+
+ /* only proceed if VMDq is enabled */
+ if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return false;
+
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ wx->rx_ring[i]->reg_idx = reg_idx;
+ }
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < wx->num_tx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & rss->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ wx->tx_ring[i]->reg_idx = reg_idx;
+ }
+ } else {
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset;
+ for (i = 0; i < wx->num_rx_queues; i++)
+ /* If we are greater than indices move to next pool */
+ wx->rx_ring[i]->reg_idx = reg_idx + i;
+
+ reg_idx = vmdq->offset;
+ for (i = 0; i < wx->num_tx_queues; i++)
+ /* If we are greater than indices move to next pool */
+ wx->tx_ring[i]->reg_idx = reg_idx + i;
+ }
+
+ return true;
+}
+
/**
* wx_cache_ring_rss - Descriptor ring to register mapping for RSS
* @wx: board private structure to initialize
@@ -1724,6 +1906,9 @@ static void wx_cache_ring_rss(struct wx *wx)
{
u16 i;
+ if (wx_cache_ring_vmdq(wx))
+ return;
+
for (i = 0; i < wx->num_rx_queues; i++)
wx->rx_ring[i]->reg_idx = i;
@@ -1781,10 +1966,17 @@ static int wx_alloc_q_vector(struct wx *wx,
/* initialize pointer to rings */
ring = q_vector->ring;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
+ case wx_mac_aml40:
default_itr = WX_12K_ITR;
- else
+ break;
+ default:
default_itr = WX_7K_ITR;
+ break;
+ }
+
/* initialize ITR */
if (txr_count && !rxr_count)
/* tx only vector */
@@ -2108,6 +2300,8 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
if (direction == -1) {
/* other causes */
+ if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags))
+ msix_vector = 0;
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = 0;
ivar = rd32(wx, WX_PX_MISC_IVAR);
@@ -2116,7 +2310,6 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
wr32(wx, WX_PX_MISC_IVAR, ivar);
} else {
/* tx or rx causes */
- msix_vector += 1; /* offset for queue vectors */
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
@@ -2140,14 +2333,22 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
int v_idx = q_vector->v_idx;
u32 itr_reg;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
itr_reg = q_vector->itr & WX_SP_MAX_EITR;
- else
+ break;
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ itr_reg = (q_vector->itr >> 3) & WX_AML_MAX_EITR;
+ break;
+ default:
itr_reg = q_vector->itr & WX_EM_MAX_EITR;
+ break;
+ }
itr_reg |= WX_PX_ITR_CNT_WDIS;
- wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg);
+ wr32(wx, WX_PX_ITR(v_idx), itr_reg);
}
/**
@@ -2161,10 +2362,17 @@ void wx_configure_vectors(struct wx *wx)
{
struct pci_dev *pdev = wx->pdev;
u32 eitrsel = 0;
- u16 v_idx;
+ u16 v_idx, i;
if (pdev->msix_enabled) {
/* Populate MSIX to EITR Select */
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ if (wx->num_vfs >= 32)
+ eitrsel = BIT(wx->num_vfs % 32) - 1;
+ } else {
+ for (i = 0; i < wx->num_vfs; i++)
+ eitrsel |= BIT(i);
+ }
wr32(wx, WX_PX_ITRSEL, eitrsel);
/* use EIAM to auto-mask when MSI-X interrupt is asserted
* this saves a register write for every interrupt
@@ -2193,9 +2401,9 @@ void wx_configure_vectors(struct wx *wx)
wx_write_eitr(q_vector);
}
- wx_set_ivar(wx, -1, 0, 0);
+ wx_set_ivar(wx, -1, 0, v_idx);
if (pdev->msix_enabled)
- wr32(wx, WX_PX_ITR(0), 1950);
+ wr32(wx, WX_PX_ITR(v_idx), 1950);
}
EXPORT_SYMBOL(wx_configure_vectors);
@@ -2424,7 +2632,7 @@ static int wx_alloc_page_pool(struct wx_ring *rx_ring)
struct page_pool_params pp_params = {
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = 0,
- .pool_size = rx_ring->size,
+ .pool_size = rx_ring->count,
.nid = dev_to_node(rx_ring->dev),
.dev = rx_ring->dev,
.dma_dir = DMA_FROM_DEVICE,
@@ -2719,7 +2927,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
netdev->features = features;
- if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX && wx->do_reset)
wx->do_reset(netdev);
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
wx_set_rx_mode(netdev);
@@ -2751,7 +2959,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
break;
}
- if (need_reset)
+ if (need_reset && wx->do_reset)
wx->do_reset(netdev);
return 0;
@@ -2804,6 +3012,33 @@ netdev_features_t wx_fix_features(struct net_device *netdev,
}
EXPORT_SYMBOL(wx_fix_features);
+#define WX_MAX_TUNNEL_HDR_LEN 80
+netdev_features_t wx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (!skb->encapsulation)
+ return features;
+
+ if (wx->mac.type == wx_mac_em)
+ return features & ~NETIF_F_CSUM_MASK;
+
+ if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
+ WX_MAX_TUNNEL_HDR_LEN))
+ return features & ~NETIF_F_CSUM_MASK;
+
+ if (skb->inner_protocol_type == ENCAP_TYPE_ETHER &&
+ skb->inner_protocol != htons(ETH_P_IP) &&
+ skb->inner_protocol != htons(ETH_P_IPV6) &&
+ skb->inner_protocol != htons(ETH_P_TEB))
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
+EXPORT_SYMBOL(wx_features_check);
+
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring)
{
@@ -2870,5 +3105,35 @@ void wx_set_ring(struct wx *wx, u32 new_tx_count,
}
EXPORT_SYMBOL(wx_set_ring);
+void wx_service_event_schedule(struct wx *wx)
+{
+ if (!test_and_set_bit(WX_STATE_SERVICE_SCHED, wx->state))
+ queue_work(system_power_efficient_wq, &wx->service_task);
+}
+EXPORT_SYMBOL(wx_service_event_schedule);
+
+void wx_service_event_complete(struct wx *wx)
+{
+ if (WARN_ON(!test_bit(WX_STATE_SERVICE_SCHED, wx->state)))
+ return;
+
+ /* flush memory to make sure state is correct before next watchdog */
+ smp_mb__before_atomic();
+ clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
+}
+EXPORT_SYMBOL(wx_service_event_complete);
+
+void wx_service_timer(struct timer_list *t)
+{
+ struct wx *wx = timer_container_of(wx, t, service_timer);
+ unsigned long next_event_offset = HZ * 2;
+
+ /* Reset the timer */
+ mod_timer(&wx->service_timer, next_event_offset + jiffies);
+
+ wx_service_event_schedule(wx);
+}
+EXPORT_SYMBOL(wx_service_timer);
+
MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
index fdeb0c315b75..aed6ea8cf0d6 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -33,7 +33,13 @@ void wx_get_stats64(struct net_device *netdev,
int wx_set_features(struct net_device *netdev, netdev_features_t features);
netdev_features_t wx_fix_features(struct net_device *netdev,
netdev_features_t features);
+netdev_features_t wx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features);
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring);
+void wx_service_event_schedule(struct wx *wx);
+void wx_service_event_complete(struct wx *wx);
+void wx_service_timer(struct timer_list *t);
-#endif /* _NGBE_LIB_H_ */
+#endif /* _WX_LIB_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
new file mode 100644
index 000000000000..73af5f11c3bd
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/pci.h>
+#include "wx_type.h"
+#include "wx_mbx.h"
+
+/**
+ * wx_obtain_mbx_lock_pf - obtain mailbox lock
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+static int wx_obtain_mbx_lock_pf(struct wx *wx, u16 vf)
+{
+ int count = 5;
+ u32 mailbox;
+
+ while (count--) {
+ /* Take ownership of the buffer */
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ mailbox = rd32(wx, WX_PXMAILBOX(vf));
+ if (mailbox & WX_PXMAILBOX_PFU)
+ return 0;
+ else if (count)
+ udelay(10);
+ }
+ wx_err(wx, "Failed to obtain mailbox lock for PF%d", vf);
+
+ return -EBUSY;
+}
+
+static int wx_check_for_bit_pf(struct wx *wx, u32 mask, int index)
+{
+ u32 mbvficr = rd32(wx, WX_MBVFICR(index));
+
+ if (!(mbvficr & mask))
+ return -EBUSY;
+ wr32(wx, WX_MBVFICR(index), mask);
+
+ return 0;
+}
+
+/**
+ * wx_check_for_ack_pf - checks to see if the VF has acked
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 if the VF has set the status bit or else -EBUSY
+ **/
+int wx_check_for_ack_pf(struct wx *wx, u16 vf)
+{
+ u32 index = vf / 16, vf_bit = vf % 16;
+
+ return wx_check_for_bit_pf(wx,
+ FIELD_PREP(WX_MBVFICR_VFACK_MASK,
+ BIT(vf_bit)),
+ index);
+}
+
+/**
+ * wx_check_for_msg_pf - checks to see if the VF has sent mail
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 if the VF has got req bit or else -EBUSY
+ **/
+int wx_check_for_msg_pf(struct wx *wx, u16 vf)
+{
+ u32 index = vf / 16, vf_bit = vf % 16;
+
+ return wx_check_for_bit_pf(wx,
+ FIELD_PREP(WX_MBVFICR_VFREQ_MASK,
+ BIT(vf_bit)),
+ index);
+}
+
+/**
+ * wx_write_mbx_pf - Places a message in the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EINVAL/-EBUSY on failure
+ **/
+int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret, i;
+
+ /* mbx->size is up to 15 */
+ if (size > mbx->size) {
+ wx_err(wx, "Invalid mailbox message size %d", size);
+ return -EINVAL;
+ }
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_pf(wx, vf);
+ if (ret)
+ return ret;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ wx_check_for_msg_pf(wx, vf);
+ wx_check_for_ack_pf(wx, vf);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ wr32a(wx, WX_PXMBMEM(vf), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer */
+ /* set mirrored mailbox flags */
+ wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_STS);
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_STS);
+
+ return 0;
+}
+
+/**
+ * wx_read_mbx_pf - Read a message from the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret;
+ u16 i;
+
+ /* limit read to size of mailbox and mbx->size is up to 15 */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_pf(wx, vf);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < size; i++)
+ msg[i] = rd32a(wx, WX_PXMBMEM(vf), i);
+
+ /* Acknowledge the message and release buffer */
+ /* set mirrored mailbox flags */
+ wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_ACK);
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_ACK);
+
+ return 0;
+}
+
+/**
+ * wx_check_for_rst_pf - checks to see if the VF has reset
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+int wx_check_for_rst_pf(struct wx *wx, u16 vf)
+{
+ u32 reg_offset = WX_VF_REG_OFFSET(vf);
+ u32 vf_shift = WX_VF_IND_SHIFT(vf);
+ u32 vflre = 0;
+
+ vflre = rd32(wx, WX_VFLRE(reg_offset));
+ if (!(vflre & BIT(vf_shift)))
+ return -EBUSY;
+ wr32(wx, WX_VFLREC(reg_offset), BIT(vf_shift));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
new file mode 100644
index 000000000000..05aae138dbc3
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+#ifndef _WX_MBX_H_
+#define _WX_MBX_H_
+
+#define WX_VXMAILBOX_SIZE 15
+
+/* PF Registers */
+#define WX_PXMAILBOX(i) (0x600 + (4 * (i))) /* i=[0,63] */
+#define WX_PXMAILBOX_STS BIT(0) /* Initiate message send to VF */
+#define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */
+#define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */
+
+#define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */
+
+#define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */
+#define WX_VFLREC(i) (0x4A8 + (4 * (i))) /* i=[0,1] */
+
+/* SR-IOV specific macros */
+#define WX_MBVFICR(i) (0x480 + (4 * (i))) /* i=[0,3] */
+#define WX_MBVFICR_VFREQ_MASK GENMASK(15, 0)
+#define WX_MBVFICR_VFACK_MASK GENMASK(31, 16)
+
+#define WX_VT_MSGTYPE_ACK BIT(31)
+#define WX_VT_MSGTYPE_NACK BIT(30)
+#define WX_VT_MSGTYPE_CTS BIT(29)
+#define WX_VT_MSGINFO_SHIFT 16
+#define WX_VT_MSGINFO_MASK GENMASK(23, 16)
+
+enum wx_pfvf_api_rev {
+ wx_mbox_api_null,
+ wx_mbox_api_13 = 4, /* API version 1.3 */
+ wx_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API */
+#define WX_VF_RESET 0x01 /* VF requests reset */
+#define WX_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define WX_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define WX_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define WX_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define WX_VF_SET_MACVLAN 0x06 /* VF requests PF unicast filter */
+#define WX_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+#define WX_VF_GET_QUEUES 0x09 /* get queue configuration */
+#define WX_VF_GET_RETA 0x0a /* VF request for RETA */
+#define WX_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define WX_VF_UPDATE_XCAST_MODE 0x0c
+#define WX_VF_GET_LINK_STATE 0x10 /* get vf link state */
+#define WX_VF_GET_FW_VERSION 0x11 /* get fw version */
+
+#define WX_VF_BACKUP 0x8001 /* VF requests backup */
+
+#define WX_PF_CONTROL_MSG BIT(8) /* PF control message */
+#define WX_PF_NOFITY_VF_LINK_STATUS 0x1
+#define WX_PF_NOFITY_VF_NET_NOT_RUNNING BIT(31)
+
+#define WX_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define WX_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define WX_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define WX_VF_DEF_QUEUE 4 /* Default queue offset */
+
+#define WX_VF_PERMADDR_MSG_LEN 4
+
+enum wxvf_xcast_modes {
+ WXVF_XCAST_MODE_NONE = 0,
+ WXVF_XCAST_MODE_MULTI,
+ WXVF_XCAST_MODE_ALLMULTI,
+ WXVF_XCAST_MODE_PROMISC,
+};
+
+int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
+int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
+int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id);
+int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id);
+int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id);
+
+#endif /* _WX_MBX_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
new file mode 100644
index 000000000000..2c39b879f977
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
@@ -0,0 +1,905 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+/* Copyright (c) 1999 - 2025 Intel Corporation. */
+
+#include <linux/ptp_classify.h>
+#include <linux/clocksource.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_ptp.h"
+#include "wx_hw.h"
+
+#define WX_INCVAL_10GB 0xCCCCCC
+#define WX_INCVAL_1GB 0x800000
+#define WX_INCVAL_100 0xA00000
+#define WX_INCVAL_10 0xC7F380
+#define WX_INCVAL_EM 0x2000000
+#define WX_INCVAL_AML 0xA00000
+
+#define WX_INCVAL_SHIFT_10GB 20
+#define WX_INCVAL_SHIFT_1GB 18
+#define WX_INCVAL_SHIFT_100 15
+#define WX_INCVAL_SHIFT_10 12
+#define WX_INCVAL_SHIFT_EM 22
+#define WX_INCVAL_SHIFT_AML 21
+
+#define WX_OVERFLOW_PERIOD (HZ * 30)
+#define WX_PTP_TX_TIMEOUT (HZ)
+
+#define WX_1588_PPS_WIDTH_EM 120
+
+#define WX_NS_PER_SEC 1000000000ULL
+
+static u64 wx_ptp_timecounter_cyc2time(struct wx *wx, u64 timestamp)
+{
+ unsigned int seq;
+ u64 ns;
+
+ do {
+ seq = read_seqbegin(&wx->hw_tc_lock);
+ ns = timecounter_cyc2time(&wx->hw_tc, timestamp);
+ } while (read_seqretry(&wx->hw_tc_lock, seq));
+
+ return ns;
+}
+
+static u64 wx_ptp_readtime(struct wx *wx, struct ptp_system_timestamp *sts)
+{
+ u32 timeh1, timeh2, timel;
+
+ timeh1 = rd32ptp(wx, WX_TSC_1588_SYSTIMH);
+ ptp_read_system_prets(sts);
+ timel = rd32ptp(wx, WX_TSC_1588_SYSTIML);
+ ptp_read_system_postts(sts);
+ timeh2 = rd32ptp(wx, WX_TSC_1588_SYSTIMH);
+
+ if (timeh1 != timeh2) {
+ ptp_read_system_prets(sts);
+ timel = rd32ptp(wx, WX_TSC_1588_SYSTIML);
+ ptp_read_system_prets(sts);
+ }
+ return (u64)timel | (u64)timeh2 << 32;
+}
+
+static int wx_ptp_adjfine(struct ptp_clock_info *ptp, long ppb)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ u64 incval, mask;
+
+ smp_mb(); /* Force any pending update before accessing. */
+ incval = READ_ONCE(wx->base_incval);
+ incval = adjust_by_scaled_ppm(incval, ppb);
+
+ mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF;
+ incval &= mask;
+ if (wx->mac.type != wx_mac_em)
+ incval |= 2 << 24;
+
+ wr32ptp(wx, WX_TSC_1588_INC, incval);
+
+ return 0;
+}
+
+static int wx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ unsigned long flags;
+
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_adjtime(&wx->hw_tc, delta);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ return 0;
+}
+
+static int wx_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ u64 ns, stamp;
+
+ stamp = wx_ptp_readtime(wx, sts);
+ ns = wx_ptp_timecounter_cyc2time(wx, stamp);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int wx_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ unsigned long flags;
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+ /* reset the timecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_init(&wx->hw_tc, &wx->hw_cc, ns);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ return 0;
+}
+
+/**
+ * wx_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state
+ * @wx: the private board structure
+ *
+ * This function should be called whenever the state related to a Tx timestamp
+ * needs to be cleared. This helps ensure that all related bits are reset for
+ * the next Tx timestamp event.
+ */
+static void wx_ptp_clear_tx_timestamp(struct wx *wx)
+{
+ rd32ptp(wx, WX_TSC_1588_STMPH);
+ if (wx->ptp_tx_skb) {
+ dev_kfree_skb_any(wx->ptp_tx_skb);
+ wx->ptp_tx_skb = NULL;
+ }
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+}
+
+/**
+ * wx_ptp_convert_to_hwtstamp - convert register value to hw timestamp
+ * @wx: private board structure
+ * @hwtstamp: stack timestamp structure
+ * @timestamp: unsigned 64bit system time value
+ *
+ * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value
+ * which can be used by the stack's ptp functions.
+ *
+ * The lock is used to protect consistency of the cyclecounter and the SYSTIME
+ * registers. However, it does not need to protect against the Rx or Tx
+ * timestamp registers, as there can't be a new timestamp until the old one is
+ * unlatched by reading.
+ *
+ * In addition to the timestamp in hardware, some controllers need a software
+ * overflow cyclecounter, and this function takes this into account as well.
+ **/
+static void wx_ptp_convert_to_hwtstamp(struct wx *wx,
+ struct skb_shared_hwtstamps *hwtstamp,
+ u64 timestamp)
+{
+ u64 ns;
+
+ ns = wx_ptp_timecounter_cyc2time(wx, timestamp);
+ hwtstamp->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * wx_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @wx: the private board struct
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+static void wx_ptp_tx_hwtstamp(struct wx *wx)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb = wx->ptp_tx_skb;
+ u64 regval = 0;
+
+ regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPL);
+ regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPH) << 32;
+
+ wx_ptp_convert_to_hwtstamp(wx, &shhwtstamps, regval);
+
+ wx->ptp_tx_skb = NULL;
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ wx->tx_hwtstamp_pkts++;
+}
+
+static int wx_ptp_tx_hwtstamp_work(struct wx *wx)
+{
+ u32 tsynctxctl;
+
+ /* we have to have a valid skb to poll for a timestamp */
+ if (!wx->ptp_tx_skb) {
+ wx_ptp_clear_tx_timestamp(wx);
+ return 0;
+ }
+
+ /* stop polling once we have a valid timestamp */
+ tsynctxctl = rd32ptp(wx, WX_TSC_1588_CTL);
+ if (tsynctxctl & WX_TSC_1588_CTL_VALID) {
+ wx_ptp_tx_hwtstamp(wx);
+ return 0;
+ }
+
+ return -1;
+}
+
+/**
+ * wx_ptp_overflow_check - watchdog task to detect SYSTIME overflow
+ * @wx: pointer to wx struct
+ *
+ * this watchdog task periodically reads the timecounter
+ * in order to prevent missing when the system time registers wrap
+ * around. This needs to be run approximately twice a minute for the fastest
+ * overflowing hardware. We run it for all hardware since it shouldn't have a
+ * large impact.
+ */
+static void wx_ptp_overflow_check(struct wx *wx)
+{
+ bool timeout = time_is_before_jiffies(wx->last_overflow_check +
+ WX_OVERFLOW_PERIOD);
+ unsigned long flags;
+
+ if (timeout) {
+ /* Update the timecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_read(&wx->hw_tc);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ wx->last_overflow_check = jiffies;
+ }
+}
+
+/**
+ * wx_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @wx: pointer to wx struct
+ *
+ * this watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to
+ * timestamp any future packets.
+ */
+static void wx_ptp_rx_hang(struct wx *wx)
+{
+ struct wx_ring *rx_ring;
+ unsigned long rx_event;
+ u32 tsyncrxctl;
+ int n;
+
+ tsyncrxctl = rd32(wx, WX_PSR_1588_CTL);
+
+ /* if we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID)) {
+ wx->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* determine the most recent watchdog or rx_timestamp event */
+ rx_event = wx->last_rx_ptp_check;
+ for (n = 0; n < wx->num_rx_queues; n++) {
+ rx_ring = wx->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ rd32(wx, WX_PSR_1588_STMPH);
+ wx->last_rx_ptp_check = jiffies;
+
+ wx->rx_hwtstamp_cleared++;
+ dev_warn(&wx->pdev->dev, "clearing RX Timestamp hang");
+ }
+}
+
+/**
+ * wx_ptp_tx_hang - detect error case where Tx timestamp never finishes
+ * @wx: private network wx structure
+ */
+static void wx_ptp_tx_hang(struct wx *wx)
+{
+ bool timeout = time_is_before_jiffies(wx->ptp_tx_start +
+ WX_PTP_TX_TIMEOUT);
+
+ if (!wx->ptp_tx_skb)
+ return;
+
+ if (!test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state))
+ return;
+
+ /* If we haven't received a timestamp within the timeout, it is
+ * reasonable to assume that it will never occur, so we can unlock the
+ * timestamp bit when this occurs.
+ */
+ if (timeout) {
+ wx_ptp_clear_tx_timestamp(wx);
+ wx->tx_hwtstamp_timeouts++;
+ dev_warn(&wx->pdev->dev, "clearing Tx timestamp hang\n");
+ }
+}
+
+static long wx_ptp_do_aux_work(struct ptp_clock_info *ptp)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ int ts_done;
+
+ ts_done = wx_ptp_tx_hwtstamp_work(wx);
+
+ wx_ptp_overflow_check(wx);
+ if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
+ wx->flags)))
+ wx_ptp_rx_hang(wx);
+ wx_ptp_tx_hang(wx);
+
+ return ts_done ? 1 : HZ;
+}
+
+static u64 wx_ptp_trigger_calc(struct wx *wx)
+{
+ struct cyclecounter *cc = &wx->hw_cc;
+ unsigned long flags;
+ u64 ns = 0;
+ u32 rem;
+
+ /* Read the current clock time, and save the cycle counter value */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ ns = timecounter_read(&wx->hw_tc);
+ wx->pps_edge_start = wx->hw_tc.cycle_last;
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+ wx->pps_edge_end = wx->pps_edge_start;
+
+ /* Figure out how far past the next second we are */
+ div_u64_rem(ns, WX_NS_PER_SEC, &rem);
+
+ /* Figure out how many nanoseconds to add to round the clock edge up
+ * to the next full second
+ */
+ rem = (WX_NS_PER_SEC - rem);
+
+ /* Adjust the clock edge to align with the next full second. */
+ wx->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult);
+ wx->pps_edge_end += div_u64(((u64)(rem + wx->pps_width) <<
+ cc->shift), cc->mult);
+
+ return (ns + rem);
+}
+
+static int wx_ptp_setup_sdp(struct wx *wx)
+{
+ struct cyclecounter *cc = &wx->hw_cc;
+ u32 tsauxc;
+ u64 nsec;
+
+ if (wx->pps_width >= WX_NS_PER_SEC) {
+ wx_err(wx, "PTP pps width cannot be longer than 1s!\n");
+ return -EINVAL;
+ }
+
+ /* disable the pin first */
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0);
+ WX_WRITE_FLUSH(wx);
+
+ if (!test_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags)) {
+ if (wx->pps_enabled) {
+ wx->pps_enabled = false;
+ wx_set_pps(wx, false, 0, 0);
+ }
+ return 0;
+ }
+
+ wx->pps_enabled = true;
+ nsec = wx_ptp_trigger_calc(wx);
+ wx_set_pps(wx, wx->pps_enabled, nsec, wx->pps_edge_start);
+
+ tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 |
+ WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0;
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32));
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32));
+ wr32ptp(wx, WX_TSC_1588_SDP(0),
+ WX_TSC_1588_SDP_FUN_SEL_TT0 | WX_TSC_1588_SDP_OUT_LEVEL_H);
+ wr32ptp(wx, WX_TSC_1588_SDP(1), WX_TSC_1588_SDP_FUN_SEL_TS0);
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc);
+ wr32ptp(wx, WX_TSC_1588_INT_EN, WX_TSC_1588_INT_EN_TT1);
+ WX_WRITE_FLUSH(wx);
+
+ /* Adjust the clock edge to align with the next full second. */
+ wx->sec_to_cc = div_u64(((u64)WX_NS_PER_SEC << cc->shift), cc->mult);
+
+ return 0;
+}
+
+static int wx_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+
+ /**
+ * When PPS is enabled, unmask the interrupt for the ClockOut
+ * feature, so that the interrupt handler can send the PPS
+ * event when the clock SDP triggers. Clear mask when PPS is
+ * disabled
+ */
+ if (rq->type != PTP_CLK_REQ_PEROUT || !wx->ptp_setup_sdp)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ if (rq->perout.phase.sec || rq->perout.phase.nsec) {
+ wx_err(wx, "Absolute start time not supported.\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.period.sec != 1 || rq->perout.period.nsec) {
+ wx_err(wx, "Only 1pps is supported.\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on;
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+ wx->pps_width = timespec64_to_ns(&ts_on);
+ } else {
+ wx->pps_width = 120000000;
+ }
+
+ if (on)
+ set_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+ else
+ clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+
+ return wx->ptp_setup_sdp(wx);
+}
+
+void wx_ptp_check_pps_event(struct wx *wx)
+{
+ u32 tsauxc, int_status;
+
+ /* this check is necessary in case the interrupt was enabled via some
+ * alternative means (ex. debug_fs). Better to check here than
+ * everywhere that calls this function.
+ */
+ if (!wx->ptp_clock)
+ return;
+
+ int_status = rd32ptp(wx, WX_TSC_1588_INT_ST);
+ if (int_status & WX_TSC_1588_INT_ST_TT1) {
+ /* disable the pin first */
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0);
+ WX_WRITE_FLUSH(wx);
+
+ wx_ptp_trigger_calc(wx);
+
+ tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 |
+ WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0;
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32));
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32));
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc);
+ WX_WRITE_FLUSH(wx);
+ }
+}
+EXPORT_SYMBOL(wx_ptp_check_pps_event);
+
+static long wx_ptp_create_clock(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ long err;
+
+ /* do nothing if we already have a clock device */
+ if (!IS_ERR_OR_NULL(wx->ptp_clock))
+ return 0;
+
+ snprintf(wx->ptp_caps.name, sizeof(wx->ptp_caps.name),
+ "%s", netdev->name);
+ wx->ptp_caps.owner = THIS_MODULE;
+ wx->ptp_caps.n_alarm = 0;
+ wx->ptp_caps.n_ext_ts = 0;
+ wx->ptp_caps.pps = 0;
+ wx->ptp_caps.adjfine = wx_ptp_adjfine;
+ wx->ptp_caps.adjtime = wx_ptp_adjtime;
+ wx->ptp_caps.gettimex64 = wx_ptp_gettimex64;
+ wx->ptp_caps.settime64 = wx_ptp_settime64;
+ wx->ptp_caps.do_aux_work = wx_ptp_do_aux_work;
+ switch (wx->mac.type) {
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ wx->ptp_caps.max_adj = 250000000;
+ wx->ptp_caps.n_per_out = 1;
+ wx->ptp_setup_sdp = wx_ptp_setup_sdp;
+ wx->ptp_caps.enable = wx_ptp_feature_enable;
+ break;
+ case wx_mac_sp:
+ wx->ptp_caps.max_adj = 250000000;
+ wx->ptp_caps.n_per_out = 0;
+ wx->ptp_setup_sdp = NULL;
+ break;
+ case wx_mac_em:
+ wx->ptp_caps.max_adj = 500000000;
+ wx->ptp_caps.n_per_out = 1;
+ wx->ptp_setup_sdp = wx_ptp_setup_sdp;
+ wx->ptp_caps.enable = wx_ptp_feature_enable;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ wx->ptp_clock = ptp_clock_register(&wx->ptp_caps, &wx->pdev->dev);
+ if (IS_ERR(wx->ptp_clock)) {
+ err = PTR_ERR(wx->ptp_clock);
+ wx->ptp_clock = NULL;
+ wx_err(wx, "ptp clock register failed\n");
+ return err;
+ } else if (wx->ptp_clock) {
+ dev_info(&wx->pdev->dev, "registered PHC device on %s\n",
+ netdev->name);
+ }
+
+ /* Set the default timestamp mode to disabled here. We do this in
+ * create_clock instead of initialization, because we don't want to
+ * override the previous settings during a suspend/resume cycle.
+ */
+ wx->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ wx->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ return 0;
+}
+
+static int wx_ptp_set_timestamp_mode(struct wx *wx,
+ struct kernel_hwtstamp_config *config)
+{
+ u32 tsync_tx_ctl = WX_TSC_1588_CTL_ENABLED;
+ u32 tsync_rx_ctl = WX_PSR_1588_CTL_ENABLED;
+ DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS);
+ u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
+ bool is_l2 = false;
+ u32 regval;
+
+ memcpy(flags, wx->flags, sizeof(wx->flags));
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ tsync_rx_mtrl = 0;
+ clear_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ clear_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1;
+ tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_SYNC;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1;
+ tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_DELAY_REQ;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_EVENT_V2;
+ is_l2 = true;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ default:
+ /* register PSR_1588_MSG must be set in order to do V1 packets,
+ * therefore it is not possible to time stamp both V1 Sync and
+ * Delay_Req messages unless hardware supports timestamping all
+ * packets => return error
+ */
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ /* define ethertype filter for timestamping L2 packets */
+ if (is_l2)
+ wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588),
+ (WX_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */
+ WX_PSR_ETYPE_SWC_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+ wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588), 0);
+
+ /* enable/disable TX */
+ regval = rd32ptp(wx, WX_TSC_1588_CTL);
+ regval &= ~WX_TSC_1588_CTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ wr32ptp(wx, WX_TSC_1588_CTL, regval);
+
+ /* enable/disable RX */
+ regval = rd32(wx, WX_PSR_1588_CTL);
+ regval &= ~(WX_PSR_1588_CTL_ENABLED | WX_PSR_1588_CTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ wr32(wx, WX_PSR_1588_CTL, regval);
+
+ /* define which PTP packets are time stamped */
+ wr32(wx, WX_PSR_1588_MSG, tsync_rx_mtrl);
+
+ WX_WRITE_FLUSH(wx);
+
+ /* configure adapter flags only when HW is actually configured */
+ memcpy(wx->flags, flags, sizeof(wx->flags));
+
+ /* clear TX/RX timestamp state, just to be sure */
+ wx_ptp_clear_tx_timestamp(wx);
+ rd32(wx, WX_PSR_1588_STMPH);
+
+ return 0;
+}
+
+static u64 wx_ptp_read(const struct cyclecounter *hw_cc)
+{
+ struct wx *wx = container_of(hw_cc, struct wx, hw_cc);
+
+ return wx_ptp_readtime(wx, NULL);
+}
+
+static void wx_ptp_link_speed_adjust(struct wx *wx, u32 *shift, u32 *incval)
+{
+ switch (wx->mac.type) {
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ *shift = WX_INCVAL_SHIFT_AML;
+ *incval = WX_INCVAL_AML;
+ return;
+ case wx_mac_em:
+ *shift = WX_INCVAL_SHIFT_EM;
+ *incval = WX_INCVAL_EM;
+ return;
+ default:
+ break;
+ }
+
+ switch (wx->speed) {
+ case SPEED_10:
+ *shift = WX_INCVAL_SHIFT_10;
+ *incval = WX_INCVAL_10;
+ break;
+ case SPEED_100:
+ *shift = WX_INCVAL_SHIFT_100;
+ *incval = WX_INCVAL_100;
+ break;
+ case SPEED_1000:
+ *shift = WX_INCVAL_SHIFT_1GB;
+ *incval = WX_INCVAL_1GB;
+ break;
+ case SPEED_10000:
+ default:
+ *shift = WX_INCVAL_SHIFT_10GB;
+ *incval = WX_INCVAL_10GB;
+ break;
+ }
+}
+
+/**
+ * wx_ptp_reset_cyclecounter - create the cycle counter from hw
+ * @wx: pointer to the wx structure
+ *
+ * This function should be called to set the proper values for the TSC_1588_INC
+ * register and tell the cyclecounter structure what the tick rate of SYSTIME
+ * is. It does not directly modify SYSTIME registers or the timecounter
+ * structure. It should be called whenever a new TSC_1588_INC value is
+ * necessary, such as during initialization or when the link speed changes.
+ */
+void wx_ptp_reset_cyclecounter(struct wx *wx)
+{
+ u32 incval = 0, mask = 0;
+ struct cyclecounter cc;
+ unsigned long flags;
+
+ /* For some of the boards below this mask is technically incorrect.
+ * The timestamp mask overflows at approximately 61bits. However the
+ * particular hardware does not overflow on an even bitmask value.
+ * Instead, it overflows due to conversion of upper 32bits billions of
+ * cycles. Timecounters are not really intended for this purpose so
+ * they do not properly function if the overflow point isn't 2^N-1.
+ * However, the actual SYSTIME values in question take ~138 years to
+ * overflow. In practice this means they won't actually overflow. A
+ * proper fix to this problem would require modification of the
+ * timecounter delta calculations.
+ */
+ cc.mask = CLOCKSOURCE_MASK(64);
+ cc.mult = 1;
+ cc.shift = 0;
+
+ cc.read = wx_ptp_read;
+ wx_ptp_link_speed_adjust(wx, &cc.shift, &incval);
+
+ /* update the base incval used to calculate frequency adjustment */
+ WRITE_ONCE(wx->base_incval, incval);
+
+ mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF;
+ incval &= mask;
+ if (wx->mac.type != wx_mac_em)
+ incval |= 2 << 24;
+ wr32ptp(wx, WX_TSC_1588_INC, incval);
+
+ smp_mb(); /* Force the above update. */
+
+ /* need lock to prevent incorrect read while modifying cyclecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ memcpy(&wx->hw_cc, &cc, sizeof(wx->hw_cc));
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+}
+EXPORT_SYMBOL(wx_ptp_reset_cyclecounter);
+
+void wx_ptp_reset(struct wx *wx)
+{
+ unsigned long flags;
+
+ /* reset the hardware timestamping mode */
+ wx_ptp_set_timestamp_mode(wx, &wx->tstamp_config);
+ wx_ptp_reset_cyclecounter(wx);
+
+ wr32ptp(wx, WX_TSC_1588_SYSTIML, 0);
+ wr32ptp(wx, WX_TSC_1588_SYSTIMH, 0);
+ WX_WRITE_FLUSH(wx);
+
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_init(&wx->hw_tc, &wx->hw_cc,
+ ktime_to_ns(ktime_get_real()));
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ wx->last_overflow_check = jiffies;
+ ptp_schedule_worker(wx->ptp_clock, HZ);
+
+ /* Now that the shift has been calculated and the systime
+ * registers reset, (re-)enable the Clock out feature
+ */
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+}
+EXPORT_SYMBOL(wx_ptp_reset);
+
+void wx_ptp_init(struct wx *wx)
+{
+ /* Initialize the seqlock_t first, since the user might call the clock
+ * functions any time after we've initialized the ptp clock device.
+ */
+ seqlock_init(&wx->hw_tc_lock);
+
+ /* obtain a ptp clock device, or re-use an existing device */
+ if (wx_ptp_create_clock(wx))
+ return;
+
+ wx->tx_hwtstamp_pkts = 0;
+ wx->tx_hwtstamp_timeouts = 0;
+ wx->tx_hwtstamp_skipped = 0;
+ wx->tx_hwtstamp_errors = 0;
+ wx->rx_hwtstamp_cleared = 0;
+ /* reset the ptp related hardware bits */
+ wx_ptp_reset(wx);
+
+ /* enter the WX_STATE_PTP_RUNNING state */
+ set_bit(WX_STATE_PTP_RUNNING, wx->state);
+}
+EXPORT_SYMBOL(wx_ptp_init);
+
+/**
+ * wx_ptp_suspend - stop ptp work items
+ * @wx: pointer to wx struct
+ *
+ * This function suspends ptp activity, and prevents more work from being
+ * generated, but does not destroy the clock device.
+ */
+void wx_ptp_suspend(struct wx *wx)
+{
+ /* leave the WX_STATE_PTP_RUNNING STATE */
+ if (!test_and_clear_bit(WX_STATE_PTP_RUNNING, wx->state))
+ return;
+
+ clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ wx_ptp_clear_tx_timestamp(wx);
+}
+EXPORT_SYMBOL(wx_ptp_suspend);
+
+/**
+ * wx_ptp_stop - destroy the ptp_clock device
+ * @wx: pointer to wx struct
+ *
+ * Completely destroy the ptp_clock device, and disable all PTP related
+ * features. Intended to be run when the device is being closed.
+ */
+void wx_ptp_stop(struct wx *wx)
+{
+ /* first, suspend ptp activity */
+ wx_ptp_suspend(wx);
+
+ /* now destroy the ptp clock device */
+ if (wx->ptp_clock) {
+ ptp_clock_unregister(wx->ptp_clock);
+ wx->ptp_clock = NULL;
+ dev_info(&wx->pdev->dev, "removed PHC on %s\n", wx->netdev->name);
+ }
+}
+EXPORT_SYMBOL(wx_ptp_stop);
+
+/**
+ * wx_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @wx: pointer to wx struct
+ * @skb: particular skb to send timestamp with
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb)
+{
+ u64 regval = 0;
+ u32 tsyncrxctl;
+
+ /* Read the tsyncrxctl register afterwards in order to prevent taking an
+ * I/O hit on every packet.
+ */
+ tsyncrxctl = rd32(wx, WX_PSR_1588_CTL);
+ if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID))
+ return;
+
+ regval |= (u64)rd32(wx, WX_PSR_1588_STMPL);
+ regval |= (u64)rd32(wx, WX_PSR_1588_STMPH) << 32;
+
+ wx_ptp_convert_to_hwtstamp(wx, skb_hwtstamps(skb), regval);
+}
+
+int wx_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ *cfg = wx->tstamp_config;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_hwtstamp_get);
+
+int wx_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(dev);
+ int err;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ err = wx_ptp_set_timestamp_mode(wx, cfg);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&wx->tstamp_config, cfg, sizeof(wx->tstamp_config));
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_hwtstamp_set);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.h b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h
new file mode 100644
index 000000000000..50db90a6e3ee
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_PTP_H_
+#define _WX_PTP_H_
+
+void wx_ptp_check_pps_event(struct wx *wx);
+void wx_ptp_reset_cyclecounter(struct wx *wx);
+void wx_ptp_reset(struct wx *wx);
+void wx_ptp_init(struct wx *wx);
+void wx_ptp_suspend(struct wx *wx);
+void wx_ptp_stop(struct wx *wx);
+void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb);
+int wx_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg);
+int wx_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+
+#endif /* _WX_PTP_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
new file mode 100644
index 000000000000..c82ae137756c
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
@@ -0,0 +1,913 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_hw.h"
+#include "wx_mbx.h"
+#include "wx_sriov.h"
+
+static void wx_vf_configuration(struct pci_dev *pdev, int event_mask)
+{
+ bool enable = !!WX_VF_ENABLE_CHECK(event_mask);
+ struct wx *wx = pci_get_drvdata(pdev);
+ u32 vfn = WX_VF_NUM_GET(event_mask);
+
+ if (enable)
+ eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr);
+}
+
+static int wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs)
+{
+ struct vf_macvlans *mv_list;
+ int num_vf_macvlans, i;
+
+ /* Initialize list of VF macvlans */
+ INIT_LIST_HEAD(&wx->vf_mvs.mvlist);
+
+ num_vf_macvlans = wx->mac.num_rar_entries -
+ (WX_MAX_PF_MACVLANS + 1 + num_vfs);
+ if (!num_vf_macvlans)
+ return -EINVAL;
+
+ mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
+ GFP_KERNEL);
+ if (!mv_list)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vf_macvlans; i++) {
+ mv_list[i].vf = -1;
+ mv_list[i].free = true;
+ list_add(&mv_list[i].mvlist, &wx->vf_mvs.mvlist);
+ }
+ wx->mv_list = mv_list;
+
+ return 0;
+}
+
+static void wx_sriov_clear_data(struct wx *wx)
+{
+ /* set num VFs to 0 to prevent access to vfinfo */
+ wx->num_vfs = 0;
+
+ /* free VF control structures */
+ kfree(wx->vfinfo);
+ wx->vfinfo = NULL;
+
+ /* free macvlan list */
+ kfree(wx->mv_list);
+ wx->mv_list = NULL;
+
+ /* set default pool back to 0 */
+ wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
+ wx->ring_feature[RING_F_VMDQ].offset = 0;
+
+ clear_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
+ clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
+ /* Disable VMDq flag so device will be set in NM mode */
+ if (wx->ring_feature[RING_F_VMDQ].limit == 1)
+ clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+}
+
+static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
+{
+ int i, ret = 0;
+ u32 value = 0;
+
+ set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
+ dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs);
+
+ if (num_vfs == 7 && wx->mac.type == wx_mac_em)
+ set_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
+
+ /* Enable VMDq flag so device will be set in VM mode */
+ set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+ if (!wx->ring_feature[RING_F_VMDQ].limit)
+ wx->ring_feature[RING_F_VMDQ].limit = 1;
+ wx->ring_feature[RING_F_VMDQ].offset = num_vfs;
+
+ wx->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!wx->vfinfo)
+ return -ENOMEM;
+
+ ret = wx_alloc_vf_macvlans(wx, num_vfs);
+ if (ret)
+ return ret;
+
+ /* Initialize default switching mode VEB */
+ wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN);
+
+ for (i = 0; i < num_vfs; i++) {
+ /* enable spoof checking for all VFs */
+ wx->vfinfo[i].spoofchk_enabled = true;
+ wx->vfinfo[i].link_enable = true;
+ /* untrust all VFs */
+ wx->vfinfo[i].trusted = false;
+ /* set the default xcast mode */
+ wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE;
+ }
+
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ value = WX_CFG_PORT_CTL_NUM_VT_8;
+ } else {
+ if (num_vfs < 32)
+ value = WX_CFG_PORT_CTL_NUM_VT_32;
+ else
+ value = WX_CFG_PORT_CTL_NUM_VT_64;
+ }
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_NUM_VT_MASK,
+ value);
+
+ return ret;
+}
+
+static void wx_sriov_reinit(struct wx *wx)
+{
+ rtnl_lock();
+ wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev));
+ rtnl_unlock();
+}
+
+void wx_disable_sriov(struct wx *wx)
+{
+ if (!pci_vfs_assigned(wx->pdev))
+ pci_disable_sriov(wx->pdev);
+ else
+ wx_err(wx, "Unloading driver while VFs are assigned.\n");
+
+ /* clear flags and free allloced data */
+ wx_sriov_clear_data(wx);
+}
+EXPORT_SYMBOL(wx_disable_sriov);
+
+static int wx_pci_sriov_enable(struct pci_dev *dev,
+ int num_vfs)
+{
+ struct wx *wx = pci_get_drvdata(dev);
+ int err = 0, i;
+
+ err = __wx_enable_sriov(wx, num_vfs);
+ if (err)
+ return err;
+
+ wx->num_vfs = num_vfs;
+ for (i = 0; i < wx->num_vfs; i++)
+ wx_vf_configuration(dev, (i | WX_VF_ENABLE));
+
+ /* reset before enabling SRIOV to avoid mailbox issues */
+ wx_sriov_reinit(wx);
+
+ err = pci_enable_sriov(dev, num_vfs);
+ if (err) {
+ wx_err(wx, "Failed to enable PCI sriov: %d\n", err);
+ goto err_out;
+ }
+
+ return num_vfs;
+err_out:
+ wx_sriov_clear_data(wx);
+ return err;
+}
+
+static void wx_pci_sriov_disable(struct pci_dev *dev)
+{
+ struct wx *wx = pci_get_drvdata(dev);
+
+ wx_disable_sriov(wx);
+ wx_sriov_reinit(wx);
+}
+
+int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct wx *wx = pci_get_drvdata(pdev);
+ int err;
+
+ if (!num_vfs) {
+ if (!pci_vfs_assigned(pdev)) {
+ wx_pci_sriov_disable(pdev);
+ return 0;
+ }
+
+ wx_err(wx, "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ err = wx_pci_sriov_enable(pdev, num_vfs);
+ if (err)
+ return err;
+
+ return num_vfs;
+}
+EXPORT_SYMBOL(wx_pci_sriov_configure);
+
+static int wx_set_vf_mac(struct wx *wx, u16 vf, const u8 *mac_addr)
+{
+ u8 hw_addr[ETH_ALEN];
+ int ret = 0;
+
+ ether_addr_copy(hw_addr, mac_addr);
+ wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
+ ret = wx_add_mac_filter(wx, hw_addr, vf);
+ if (ret >= 0)
+ ether_addr_copy(wx->vfinfo[vf].vf_mac_addr, mac_addr);
+ else
+ eth_zero_addr(wx->vfinfo[vf].vf_mac_addr);
+
+ return ret;
+}
+
+static void wx_set_vmolr(struct wx *wx, u16 vf, bool aupe)
+{
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+
+ vmolr |= WX_PSR_VM_L2CTL_BAM;
+ if (aupe)
+ vmolr |= WX_PSR_VM_L2CTL_AUPE;
+ else
+ vmolr &= ~WX_PSR_VM_L2CTL_AUPE;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+}
+
+static void wx_set_vmvir(struct wx *wx, u16 vid, u16 qos, u16 vf)
+{
+ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) |
+ WX_TDM_VLAN_INS_VLANA_DEFAULT;
+
+ wr32(wx, WX_TDM_VLAN_INS(vf), vmvir);
+}
+
+static int wx_set_vf_vlan(struct wx *wx, int add, int vid, u16 vf)
+{
+ if (!vid && !add)
+ return 0;
+
+ return wx_set_vfta(wx, vid, vf, (bool)add);
+}
+
+static void wx_set_vlan_anti_spoofing(struct wx *wx, bool enable, int vf)
+{
+ u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
+ u32 pfvfspoof;
+
+ pfvfspoof = rd32(wx, WX_TDM_VLAN_AS(index));
+ if (enable)
+ pfvfspoof |= BIT(vf_bit);
+ else
+ pfvfspoof &= ~BIT(vf_bit);
+ wr32(wx, WX_TDM_VLAN_AS(index), pfvfspoof);
+}
+
+static void wx_write_qde(struct wx *wx, u32 vf, u32 qde)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ u32 reg = 0, n = vf * q_per_pool / 32;
+ u32 i = vf * q_per_pool;
+
+ reg = rd32(wx, WX_RDM_PF_QDE(n));
+ for (i = (vf * q_per_pool - n * 32);
+ i < ((vf + 1) * q_per_pool - n * 32);
+ i++) {
+ if (qde == 1)
+ reg |= qde << i;
+ else
+ reg &= qde << i;
+ }
+
+ wr32(wx, WX_RDM_PF_QDE(n), reg);
+}
+
+static void wx_clear_vmvir(struct wx *wx, u32 vf)
+{
+ wr32(wx, WX_TDM_VLAN_INS(vf), 0);
+}
+
+static void wx_ping_vf(struct wx *wx, int vf)
+{
+ u32 ping = WX_PF_CONTROL_MSG;
+
+ if (wx->vfinfo[vf].clear_to_send)
+ ping |= WX_VT_MSGTYPE_CTS;
+ wx_write_mbx_pf(wx, &ping, 1, vf);
+}
+
+static void wx_set_vf_rx_tx(struct wx *wx, int vf)
+{
+ u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
+ u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
+
+ reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index));
+ reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index));
+
+ if (wx->vfinfo[vf].link_enable) {
+ reg_req_tx = reg_cur_tx | BIT(vf_bit);
+ reg_req_rx = reg_cur_rx | BIT(vf_bit);
+ /* Enable particular VF */
+ if (reg_cur_tx != reg_req_tx)
+ wr32(wx, WX_TDM_VF_TE(index), reg_req_tx);
+ if (reg_cur_rx != reg_req_rx)
+ wr32(wx, WX_RDM_VF_RE(index), reg_req_rx);
+ } else {
+ reg_req_tx = BIT(vf_bit);
+ reg_req_rx = BIT(vf_bit);
+ /* Disable particular VF */
+ if (reg_cur_tx & reg_req_tx)
+ wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx);
+ if (reg_cur_rx & reg_req_rx)
+ wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx);
+ }
+}
+
+static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ unsigned int default_tc = 0;
+
+ msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+ msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+
+ if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos)
+ msgbuf[WX_VF_TRANS_VLAN] = 1;
+ else
+ msgbuf[WX_VF_TRANS_VLAN] = 0;
+
+ /* notify VF of default queue */
+ msgbuf[WX_VF_DEF_QUEUE] = default_tc;
+
+ return 0;
+}
+
+static void wx_vf_reset_event(struct wx *wx, u16 vf)
+{
+ struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
+ u8 num_tcs = netdev_get_num_tc(wx->netdev);
+
+ /* add PF assigned VLAN */
+ wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf);
+
+ /* reset offloads to defaults */
+ wx_set_vmolr(wx, vf, !vfinfo->pf_vlan);
+
+ /* set outgoing tags for VFs */
+ if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
+ wx_clear_vmvir(wx, vf);
+ } else {
+ if (vfinfo->pf_qos || !num_tcs)
+ wx_set_vmvir(wx, vfinfo->pf_vlan,
+ vfinfo->pf_qos, vf);
+ else
+ wx_set_vmvir(wx, vfinfo->pf_vlan,
+ wx->default_up, vf);
+ }
+
+ /* reset multicast table array for vf */
+ wx->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ wx_set_rx_mode(wx->netdev);
+
+ wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
+ /* reset VF api back to unknown */
+ wx->vfinfo[vf].vf_api = wx_mbox_api_null;
+}
+
+static void wx_vf_reset_msg(struct wx *wx, u16 vf)
+{
+ const u8 *vf_mac = wx->vfinfo[vf].vf_mac_addr;
+ struct net_device *dev = wx->netdev;
+ u32 msgbuf[5] = {0, 0, 0, 0, 0};
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 reg = 0, index, vf_bit;
+ int pf_max_frame;
+
+ /* reset the filters for the device */
+ wx_vf_reset_event(wx, vf);
+
+ /* set vf mac address */
+ if (!is_zero_ether_addr(vf_mac))
+ wx_set_vf_mac(wx, vf, vf_mac);
+
+ index = WX_VF_REG_OFFSET(vf);
+ vf_bit = WX_VF_IND_SHIFT(vf);
+
+ /* force drop enable for all VF Rx queues */
+ wx_write_qde(wx, vf, 1);
+
+ /* set transmit and receive for vf */
+ wx_set_vf_rx_tx(wx, vf);
+
+ pf_max_frame = dev->mtu + ETH_HLEN;
+
+ if (pf_max_frame > ETH_FRAME_LEN)
+ reg = BIT(vf_bit);
+ wr32(wx, WX_RDM_VFRE_CLR(index), reg);
+
+ /* enable VF mailbox for further messages */
+ wx->vfinfo[vf].clear_to_send = true;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = WX_VF_RESET;
+ if (!is_zero_ether_addr(vf_mac)) {
+ msgbuf[0] |= WX_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, ETH_ALEN);
+ } else {
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ wx_err(wx, "VF %d has no MAC address assigned", vf);
+ }
+
+ msgbuf[3] = wx->mac.mc_filter_type;
+ wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf);
+}
+
+static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ const u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ int ret;
+
+ if (!is_valid_ether_addr(new_mac)) {
+ wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
+ return -EINVAL;
+ }
+
+ if (wx->vfinfo[vf].pf_set_mac &&
+ memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) {
+ wx_err(wx,
+ "VF %d attempt to set a MAC but it already had a MAC.",
+ vf);
+ return -EBUSY;
+ }
+
+ ret = wx_set_vf_mac(wx, vf, new_mac);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
+ u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK)
+ >> WX_VT_MSGINFO_SHIFT;
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+ u32 vector_bit, vector_reg, mta_reg, i;
+ u16 *hash_list = (u16 *)&msgbuf[1];
+
+ /* only so many hash values supported */
+ entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES);
+ vfinfo->num_vf_mc_hashes = entries;
+
+ for (i = 0; i < entries; i++)
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[i]);
+ vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[i]);
+ mta_reg = wx->mac.mta_shadow[vector_reg];
+ mta_reg |= BIT(vector_bit);
+ wx->mac.mta_shadow[vector_reg] = mta_reg;
+ wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg);
+ }
+ vmolr |= WX_PSR_VM_L2CTL_ROMPE;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+}
+
+static void wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf)
+{
+ u32 index, vf_bit, vfre;
+ u32 max_frs, reg_val;
+
+ /* determine VF receive enable location */
+ index = WX_VF_REG_OFFSET(vf);
+ vf_bit = WX_VF_IND_SHIFT(vf);
+
+ vfre = rd32(wx, WX_RDM_VF_RE(index));
+ vfre |= BIT(vf_bit);
+ wr32(wx, WX_RDM_VF_RE(index), vfre);
+
+ /* pull current max frame size from hardware */
+ max_frs = DIV_ROUND_UP(max_frame, 1024);
+ reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK;
+ if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA))
+ wr32(wx, WX_MAC_WDG_TIMEOUT,
+ max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
+}
+
+static int wx_find_vlvf_entry(struct wx *wx, u32 vlan)
+{
+ int regindex;
+ u32 vlvf;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* Search for the vlan id in the VLVF entries */
+ for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC);
+ if ((vlvf & VLAN_VID_MASK) == vlan)
+ break;
+ }
+
+ /* Return a negative value if not found */
+ if (regindex >= WX_PSR_VLAN_SWC_ENTRIES)
+ regindex = -EINVAL;
+
+ return regindex;
+}
+
+static int wx_set_vf_macvlan(struct wx *wx,
+ u16 vf, int index, unsigned char *mac_addr)
+{
+ struct vf_macvlans *entry;
+ struct list_head *pos;
+ int retval = 0;
+
+ if (index <= 1) {
+ list_for_each(pos, &wx->vf_mvs.mvlist) {
+ entry = list_entry(pos, struct vf_macvlans, mvlist);
+ if (entry->vf == vf) {
+ entry->vf = -1;
+ entry->free = true;
+ entry->is_macvlan = false;
+ wx_del_mac_filter(wx, entry->vf_macvlan, vf);
+ }
+ }
+ }
+
+ if (!index)
+ return 0;
+
+ entry = NULL;
+ list_for_each(pos, &wx->vf_mvs.mvlist) {
+ entry = list_entry(pos, struct vf_macvlans, mvlist);
+ if (entry->free)
+ break;
+ }
+
+ if (!entry || !entry->free)
+ return -ENOSPC;
+
+ retval = wx_add_mac_filter(wx, mac_addr, vf);
+ if (retval >= 0) {
+ entry->free = false;
+ entry->is_macvlan = true;
+ entry->vf = vf;
+ memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
+ }
+
+ return retval;
+}
+
+static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT;
+ int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK);
+ int ret;
+
+ if (add)
+ wx->vfinfo[vf].vlan_count++;
+ else if (wx->vfinfo[vf].vlan_count)
+ wx->vfinfo[vf].vlan_count--;
+
+ /* in case of promiscuous mode any VLAN filter set for a VF must
+ * also have the PF pool added to it.
+ */
+ if (add && wx->netdev->flags & IFF_PROMISC)
+ wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
+
+ ret = wx_set_vf_vlan(wx, add, vid, vf);
+ if (!ret && wx->vfinfo[vf].spoofchk_enabled)
+ wx_set_vlan_anti_spoofing(wx, true, vf);
+
+ /* Go through all the checks to see if the VLAN filter should
+ * be wiped completely.
+ */
+ if (!add && wx->netdev->flags & IFF_PROMISC) {
+ u32 bits = 0, vlvf;
+ int reg_ndx;
+
+ reg_ndx = wx_find_vlvf_entry(wx, vid);
+ if (reg_ndx < 0)
+ return -ENOSPC;
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC);
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ if (VMDQ_P(0) < 32) {
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
+ bits &= ~BIT(VMDQ_P(0));
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
+ } else {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
+ bits &= ~BIT(VMDQ_P(0) % 32);
+ bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
+ }
+ /* If the filter was removed then ensure PF pool bit
+ * is cleared if the PF only added itself to the pool
+ * because the PF is in promiscuous mode.
+ */
+ if ((vlvf & VLAN_VID_MASK) == vid && !bits)
+ wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
+ }
+
+ return 0;
+}
+
+static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >>
+ WX_VT_MSGINFO_SHIFT;
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ int err;
+
+ if (wx->vfinfo[vf].pf_set_mac && index > 0) {
+ wx_err(wx, "VF %d request MACVLAN filter but is denied\n", vf);
+ return -EINVAL;
+ }
+
+ /* An non-zero index indicates the VF is setting a filter */
+ if (index) {
+ if (!is_valid_ether_addr(new_mac)) {
+ wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
+ return -EINVAL;
+ }
+ /* If the VF is allowed to set MAC filters then turn off
+ * anti-spoofing to avoid false positives.
+ */
+ if (wx->vfinfo[vf].spoofchk_enabled)
+ wx_set_vf_spoofchk(wx->netdev, vf, false);
+ }
+
+ err = wx_set_vf_macvlan(wx, vf, index, new_mac);
+ if (err == -ENOSPC)
+ wx_err(wx,
+ "VF %d request MACVLAN filter but there is no space\n",
+ vf);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ int api = msgbuf[1];
+
+ switch (api) {
+ case wx_mbox_api_13:
+ wx->vfinfo[vf].vf_api = api;
+ return 0;
+ default:
+ wx_err(wx, "VF %d requested invalid api version %u\n", vf, api);
+ return -EINVAL;
+ }
+}
+
+static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ msgbuf[1] = wx->vfinfo[vf].link_enable;
+
+ return 0;
+}
+
+static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ unsigned long fw_version = 0ULL;
+ int ret = 0;
+
+ ret = kstrtoul(wx->eeprom_id, 16, &fw_version);
+ if (ret)
+ return -EOPNOTSUPP;
+ msgbuf[1] = fw_version;
+
+ return 0;
+}
+
+static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ int xcast_mode = msgbuf[1];
+ u32 vmolr, disable, enable;
+
+ if (wx->vfinfo[vf].xcast_mode == xcast_mode)
+ return 0;
+
+ switch (xcast_mode) {
+ case WXVF_XCAST_MODE_NONE:
+ disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_VPE;
+ enable = 0;
+ break;
+ case WXVF_XCAST_MODE_MULTI:
+ disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_VPE;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE;
+ break;
+ case WXVF_XCAST_MODE_ALLMULTI:
+ disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE;
+ break;
+ case WXVF_XCAST_MODE_PROMISC:
+ disable = 0;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_VPE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+ vmolr &= ~disable;
+ vmolr |= enable;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+
+ wx->vfinfo[vf].xcast_mode = xcast_mode;
+ msgbuf[1] = xcast_mode;
+
+ return 0;
+}
+
+static void wx_rcv_msg_from_vf(struct wx *wx, u16 vf)
+{
+ u16 mbx_size = WX_VXMAILBOX_SIZE;
+ u32 msgbuf[WX_VXMAILBOX_SIZE];
+ int retval;
+
+ retval = wx_read_mbx_pf(wx, msgbuf, mbx_size, vf);
+ if (retval) {
+ wx_err(wx, "Error receiving message from VF\n");
+ return;
+ }
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (WX_VT_MSGTYPE_ACK | WX_VT_MSGTYPE_NACK))
+ return;
+
+ if (msgbuf[0] == WX_VF_RESET) {
+ wx_vf_reset_msg(wx, vf);
+ return;
+ }
+
+ /* until the vf completes a virtual function reset it should not be
+ * allowed to start any configuration.
+ */
+ if (!wx->vfinfo[vf].clear_to_send) {
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ wx_write_mbx_pf(wx, msgbuf, 1, vf);
+ return;
+ }
+
+ switch ((msgbuf[0] & U16_MAX)) {
+ case WX_VF_SET_MAC_ADDR:
+ retval = wx_set_vf_mac_addr(wx, msgbuf, vf);
+ break;
+ case WX_VF_SET_MULTICAST:
+ wx_set_vf_multicasts(wx, msgbuf, vf);
+ retval = 0;
+ break;
+ case WX_VF_SET_VLAN:
+ retval = wx_set_vf_vlan_msg(wx, msgbuf, vf);
+ break;
+ case WX_VF_SET_LPE:
+ wx_set_vf_lpe(wx, msgbuf[1], vf);
+ retval = 0;
+ break;
+ case WX_VF_SET_MACVLAN:
+ retval = wx_set_vf_macvlan_msg(wx, msgbuf, vf);
+ break;
+ case WX_VF_API_NEGOTIATE:
+ retval = wx_negotiate_vf_api(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_QUEUES:
+ retval = wx_get_vf_queues(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_LINK_STATE:
+ retval = wx_get_vf_link_state(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_FW_VERSION:
+ retval = wx_get_fw_version(wx, msgbuf, vf);
+ break;
+ case WX_VF_UPDATE_XCAST_MODE:
+ retval = wx_update_vf_xcast_mode(wx, msgbuf, vf);
+ break;
+ case WX_VF_BACKUP:
+ break;
+ default:
+ wx_err(wx, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= WX_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= WX_VT_MSGTYPE_CTS;
+
+ wx_write_mbx_pf(wx, msgbuf, mbx_size, vf);
+}
+
+static void wx_rcv_ack_from_vf(struct wx *wx, u16 vf)
+{
+ u32 msg = WX_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!wx->vfinfo[vf].clear_to_send)
+ wx_write_mbx_pf(wx, &msg, 1, vf);
+}
+
+void wx_msg_task(struct wx *wx)
+{
+ u16 vf;
+
+ for (vf = 0; vf < wx->num_vfs; vf++) {
+ /* process any reset requests */
+ if (!wx_check_for_rst_pf(wx, vf))
+ wx_vf_reset_event(wx, vf);
+
+ /* process any messages pending */
+ if (!wx_check_for_msg_pf(wx, vf))
+ wx_rcv_msg_from_vf(wx, vf);
+
+ /* process any acks */
+ if (!wx_check_for_ack_pf(wx, vf))
+ wx_rcv_ack_from_vf(wx, vf);
+ }
+}
+EXPORT_SYMBOL(wx_msg_task);
+
+void wx_disable_vf_rx_tx(struct wx *wx)
+{
+ wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX);
+ wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX);
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX);
+ wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX);
+ }
+}
+EXPORT_SYMBOL(wx_disable_vf_rx_tx);
+
+void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up)
+{
+ u32 msgbuf[2] = {0, 0};
+ u16 i;
+
+ if (!wx->num_vfs)
+ return;
+ msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG;
+ if (link_up)
+ msgbuf[1] = FIELD_PREP(GENMASK(31, 1), wx->speed) | link_up;
+ if (wx->notify_down)
+ msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING;
+ for (i = 0; i < wx->num_vfs; i++) {
+ if (wx->vfinfo[i].clear_to_send)
+ msgbuf[0] |= WX_VT_MSGTYPE_CTS;
+ wx_write_mbx_pf(wx, msgbuf, 2, i);
+ }
+}
+EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status);
+
+static void wx_set_vf_link_state(struct wx *wx, int vf, int state)
+{
+ wx->vfinfo[vf].link_state = state;
+ switch (state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ if (netif_running(wx->netdev))
+ wx->vfinfo[vf].link_enable = true;
+ else
+ wx->vfinfo[vf].link_enable = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ wx->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ wx->vfinfo[vf].link_enable = false;
+ break;
+ }
+ /* restart the VF */
+ wx->vfinfo[vf].clear_to_send = false;
+ wx_ping_vf(wx, vf);
+
+ wx_set_vf_rx_tx(wx, vf);
+}
+
+void wx_set_all_vfs(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_vfs; i++)
+ wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state);
+}
+EXPORT_SYMBOL(wx_set_all_vfs);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
new file mode 100644
index 000000000000..8a3a47bb5815
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_SRIOV_H_
+#define _WX_SRIOV_H_
+
+#define WX_VF_ENABLE_CHECK(_m) FIELD_GET(BIT(31), (_m))
+#define WX_VF_NUM_GET(_m) FIELD_GET(GENMASK(5, 0), (_m))
+#define WX_VF_ENABLE BIT(31)
+
+void wx_disable_sriov(struct wx *wx);
+int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void wx_msg_task(struct wx *wx);
+void wx_disable_vf_rx_tx(struct wx *wx);
+void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up);
+void wx_set_all_vfs(struct wx *wx);
+
+#endif /* _WX_SRIOV_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b54bffda027b..c363379126c0 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -4,6 +4,8 @@
#ifndef _WX_TYPE_H_
#define _WX_TYPE_H_
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#include <linux/bitfield.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
@@ -18,8 +20,13 @@
/* MSI-X capability fields masks */
#define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF
#define WX_PCI_LINK_STATUS 0xB2
+#define WX_MAX_PF_MACVLANS 15
+#define WX_MAX_VF_MC_ENTRIES 30
/**************** Global Registers ****************************/
+#define WX_VF_REG_OFFSET(_v) FIELD_GET(GENMASK(15, 5), (_v))
+#define WX_VF_IND_SHIFT(_v) FIELD_GET(GENMASK(4, 0), (_v))
+
/* chip control Registers */
#define WX_MIS_PWR 0x10000
#define WX_MIS_RST 0x1000C
@@ -74,6 +81,9 @@
#define WX_MAC_LXONOFFRXC 0x11E0C
/*********************** Receive DMA registers **************************/
+#define WX_RDM_VF_RE(_i) (0x12004 + ((_i) * 4))
+#define WX_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4))
+#define WX_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4))
#define WX_RDM_DRP_PKT 0x12500
#define WX_RDM_PKT_CNT 0x12504
#define WX_RDM_BYTE_CNT_LSB 0x12508
@@ -82,12 +92,17 @@
/************************* Port Registers ************************************/
/* port cfg Registers */
#define WX_CFG_PORT_CTL 0x14400
+#define WX_CFG_PORT_CTL_PFRSTD BIT(14)
#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
#define WX_CFG_PORT_CTL_QINQ BIT(2)
#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */
+#define WX_CFG_PORT_CTL_NUM_VT_NONE 0
+#define WX_CFG_PORT_CTL_NUM_VT_8 FIELD_PREP(GENMASK(13, 12), 1)
+#define WX_CFG_PORT_CTL_NUM_VT_32 FIELD_PREP(GENMASK(13, 12), 2)
+#define WX_CFG_PORT_CTL_NUM_VT_64 FIELD_PREP(GENMASK(13, 12), 3)
/* GPIO Registers */
#define WX_GPIO_DR 0x14800
@@ -110,6 +125,11 @@
/*********************** Transmit DMA registers **************************/
/* transmit global control */
#define WX_TDM_CTL 0x18000
+#define WX_TDM_VF_TE(_i) (0x18004 + ((_i) * 4))
+#define WX_TDM_MAC_AS(_i) (0x18060 + ((_i) * 4))
+#define WX_TDM_VLAN_AS(_i) (0x18070 + ((_i) * 4))
+#define WX_TDM_VFTE_CLR(_i) (0x180A0 + ((_i) * 4))
+
/* TDM CTL BIT */
#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4))
@@ -163,6 +183,7 @@
/******************************* PSR Registers *******************************/
/* psr control */
#define WX_PSR_CTL 0x15000
+#define WX_PSR_VM_CTL 0x151B0
/* Header split receive */
#define WX_PSR_CTL_SW_EN BIT(18)
#define WX_PSR_CTL_RSC_ACK BIT(17)
@@ -180,14 +201,36 @@
#define WX_PSR_VLAN_CTL 0x15088
#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */
#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */
+/* EType Queue Filter */
+#define WX_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4))
+#define WX_PSR_ETYPE_SWC_FILTER_1588 3
+#define WX_PSR_ETYPE_SWC_FILTER_EN BIT(31)
+#define WX_PSR_ETYPE_SWC_1588 BIT(30)
+/* 1588 */
+#define WX_PSR_1588_MSG 0x15120
+#define WX_PSR_1588_MSG_V1_SYNC FIELD_PREP(GENMASK(7, 0), 0)
+#define WX_PSR_1588_MSG_V1_DELAY_REQ FIELD_PREP(GENMASK(7, 0), 1)
+#define WX_PSR_1588_STMPL 0x151E8
+#define WX_PSR_1588_STMPH 0x151A4
+#define WX_PSR_1588_CTL 0x15188
+#define WX_PSR_1588_CTL_ENABLED BIT(4)
+#define WX_PSR_1588_CTL_TYPE_MASK GENMASK(3, 1)
+#define WX_PSR_1588_CTL_TYPE_L4_V1 FIELD_PREP(GENMASK(3, 1), 1)
+#define WX_PSR_1588_CTL_TYPE_EVENT_V2 FIELD_PREP(GENMASK(3, 1), 5)
+#define WX_PSR_1588_CTL_VALID BIT(0)
/* mcasst/ucast overflow tbl */
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
+#define WX_PSR_MC_TBL_REG(_i) FIELD_GET(GENMASK(11, 5), (_i))
+#define WX_PSR_MC_TBL_BIT(_i) FIELD_GET(GENMASK(4, 0), (_i))
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
+#define WX_PSR_VM_CTL_REPLEN BIT(30) /* replication enabled */
+#define WX_PSR_VM_CTL_POOL_MASK GENMASK(12, 7)
/* VM L2 contorl */
#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4))
#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */
#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */
+#define WX_PSR_VM_L2CTL_VPE BIT(7) /* vlan promiscuous mode */
#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */
#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */
#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */
@@ -226,10 +269,12 @@
#define WX_PSR_VLAN_SWC 0x16220
#define WX_PSR_VLAN_SWC_VM_L 0x16224
#define WX_PSR_VLAN_SWC_VM_H 0x16228
+#define WX_PSR_VLAN_SWC_VM(_i) (0x16224 + ((_i) * 4))
#define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */
/* VLAN pool filtering masks */
#define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */
#define WX_PSR_VLAN_SWC_ENTRIES 64
+#define WX_PSR_VLAN_SWC_VLANID_MASK GENMASK(11, 0)
/********************************* RSEC **************************************/
/* general rsec */
@@ -240,6 +285,13 @@
#define WX_RSC_ST 0x17004
#define WX_RSC_ST_RSEC_RDY BIT(0)
+/*********************** Transmit DMA registers **************************/
+/* transmit global control */
+#define WX_TDM_ETYPE_AS(_i) (0x18058 + ((_i) * 4))
+#define WX_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4))
+/* Per VF Port VLAN insertion rules */
+#define WX_TDM_VLAN_INS_VLANA_DEFAULT BIT(30) /* Always use default VLAN*/
+
/****************************** TDB ******************************************/
#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4))
#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
@@ -253,6 +305,32 @@
#define WX_TSC_ST_SECTX_RDY BIT(0)
#define WX_TSC_BUF_AE 0x1D00C
#define WX_TSC_BUF_AE_THR GENMASK(9, 0)
+/* 1588 */
+#define WX_TSC_1588_CTL 0x11F00
+#define WX_TSC_1588_CTL_ENABLED BIT(4)
+#define WX_TSC_1588_CTL_VALID BIT(0)
+#define WX_TSC_1588_STMPL 0x11F04
+#define WX_TSC_1588_STMPH 0x11F08
+#define WX_TSC_1588_SYSTIML 0x11F0C
+#define WX_TSC_1588_SYSTIMH 0x11F10
+#define WX_TSC_1588_INC 0x11F14
+#define WX_TSC_1588_INT_ST 0x11F20
+#define WX_TSC_1588_INT_ST_TT1 BIT(5)
+#define WX_TSC_1588_INT_EN 0x11F24
+#define WX_TSC_1588_INT_EN_TT1 BIT(5)
+#define WX_TSC_1588_AUX_CTL 0x11F28
+#define WX_TSC_1588_AUX_CTL_EN_TS0 BIT(8)
+#define WX_TSC_1588_AUX_CTL_EN_TT1 BIT(2)
+#define WX_TSC_1588_AUX_CTL_PLSG BIT(1)
+#define WX_TSC_1588_AUX_CTL_EN_TT0 BIT(0)
+#define WX_TSC_1588_TRGT_L(i) (0x11F2C + ((i) * 8)) /* [0,1] */
+#define WX_TSC_1588_TRGT_H(i) (0x11F30 + ((i) * 8)) /* [0,1] */
+#define WX_TSC_1588_SDP(i) (0x11F5C + ((i) * 4)) /* [0,3] */
+#define WX_TSC_1588_SDP_OUT_LEVEL_H FIELD_PREP(BIT(4), 0)
+#define WX_TSC_1588_SDP_OUT_LEVEL_L FIELD_PREP(BIT(4), 1)
+#define WX_TSC_1588_SDP_FUN_SEL_MASK GENMASK(2, 0)
+#define WX_TSC_1588_SDP_FUN_SEL_TT0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 1)
+#define WX_TSC_1588_SDP_FUN_SEL_TS0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 5)
/************************************** MNG ********************************/
#define WX_MNG_SWFW_SYNC 0x1E008
@@ -264,6 +342,10 @@
#define WX_MNG_MBOX_CTL_FWRDY BIT(2)
#define WX_MNG_BMC2OS_CNT 0x1E090
#define WX_MNG_OS2BMC_CNT 0x1E094
+#define WX_SW2FW_MBOX_CMD 0x1E0A0
+#define WX_SW2FW_MBOX_CMD_VLD BIT(31)
+#define WX_SW2FW_MBOX 0x1E200
+#define WX_FW2SW_MBOX 0x1E300
/************************************* ETH MAC *****************************/
#define WX_MAC_TX_CFG 0x11000
@@ -279,6 +361,9 @@
#define WX_MAC_WDG_TIMEOUT 0x1100C
#define WX_MAC_RX_FLOW_CTRL 0x11090
#define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */
+
+#define WX_MAC_WDG_TIMEOUT_WTO_MASK GENMASK(3, 0)
+#define WX_MAC_WDG_TIMEOUT_WTO_DELTA 2
/* MDIO Registers */
#define WX_MSCA 0x11200
#define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v)
@@ -327,6 +412,7 @@ enum WX_MSCA_CMD_value {
#define WX_12K_ITR 336
#define WX_20K_ITR 200
#define WX_SP_MAX_EITR 0x00000FF8U
+#define WX_AML_MAX_EITR 0x00000FFFU
#define WX_EM_MAX_EITR 0x00007FFCU
/* transmit DMA Registers */
@@ -367,9 +453,19 @@ enum WX_MSCA_CMD_value {
/* Number of 80 microseconds we wait for PCI Express master disable */
#define WX_PCI_MASTER_DISABLE_TIMEOUT 80000
+#define WX_RSS_64Q_MASK 0x3F
+#define WX_RSS_8Q_MASK 0x7
+#define WX_RSS_4Q_MASK 0x3
+#define WX_RSS_2Q_MASK 0x1
+#define WX_RSS_DISABLED_MASK 0x0
+
+#define WX_VMDQ_4Q_MASK 0x7C
+#define WX_VMDQ_2Q_MASK 0x7E
+
/****************** Manageablility Host Interface defines ********************/
#define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */
#define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */
+#define WX_HIC_HDR_INDEX_MAX 255
#define FW_READ_SHADOW_RAM_CMD 0x31
#define FW_READ_SHADOW_RAM_LEN 0x6
@@ -382,6 +478,8 @@ enum WX_MSCA_CMD_value {
#define FW_CEM_CMD_RESERVED 0X0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_PPS_SET_CMD 0xF6
+#define FW_PPS_SET_LEN 0x14
#define WX_SW_REGION_PTR 0x1C
@@ -431,7 +529,7 @@ enum WX_MSCA_CMD_value {
#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 128
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
-#define VMDQ_P(p) p
+#define VMDQ_P(p) ((p) + wx->ring_feature[RING_F_VMDQ].offset)
/* Supported Rx Buffer Sizes */
#define WX_RXBUFFER_256 256 /* Used for skb receive header */
@@ -460,6 +558,8 @@ enum WX_MSCA_CMD_value {
#define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */
#define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */
#define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/
+#define WX_RXD_STAT_IPV6EX BIT(12) /* IPv6 Dest Header */
+#define WX_RXD_STAT_TS BIT(14) /* IEEE1588 Time Stamp */
#define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */
#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */
@@ -535,8 +635,6 @@ enum wx_l2_ptypes {
#define WX_RXD_PKTTYPE(_rxd) \
((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF)
-#define WX_RXD_IPV6EX(_rxd) \
- ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1)
/*********************** Transmit Descriptor Config Masks ****************/
#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */
#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */
@@ -663,21 +761,30 @@ struct wx_hic_hdr {
u8 cmd_resv;
u8 ret_status;
} cmd_or_resp;
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
struct wx_hic_hdr2_req {
u8 cmd;
u8 buf_lenh;
u8 buf_lenl;
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
struct wx_hic_hdr2_rsp {
u8 cmd;
u8 buf_lenl;
u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
union wx_hic_hdr2 {
@@ -701,12 +808,25 @@ struct wx_hic_reset {
u16 reset_type;
};
+struct wx_hic_set_pps {
+ struct wx_hic_hdr hdr;
+ u8 lan_id;
+ u8 enable;
+ u16 pad2;
+ u64 nsec;
+ u64 cycles;
+};
+
/* Bus parameters */
struct wx_bus_info {
u8 func;
u16 device;
};
+struct wx_mbx_info {
+ u16 size;
+};
+
struct wx_thermal_sensor_data {
s16 temp;
s16 alarm_thresh;
@@ -716,14 +836,16 @@ struct wx_thermal_sensor_data {
enum wx_mac_type {
wx_mac_unknown = 0,
wx_mac_sp,
- wx_mac_em
+ wx_mac_em,
+ wx_mac_aml,
+ wx_mac_aml40,
};
-enum sp_media_type {
- sp_media_unknown = 0,
- sp_media_fiber,
- sp_media_copper,
- sp_media_backplane
+enum wx_media_type {
+ wx_media_unknown = 0,
+ wx_media_fiber,
+ wx_media_copper,
+ wx_media_backplane
};
enum em_mac_type {
@@ -863,6 +985,7 @@ struct wx_tx_context_desc {
*/
struct wx_tx_buffer {
union wx_tx_desc *next_to_watch;
+ unsigned long time_stamp;
struct sk_buff *skb;
unsigned int bytecount;
unsigned short gso_segs;
@@ -924,6 +1047,7 @@ struct wx_ring {
unsigned int size; /* length in bytes */
u16 count; /* amount of descriptors */
+ unsigned long last_rx_timestamp;
u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets
@@ -977,6 +1101,7 @@ struct wx_ring_feature {
enum wx_ring_f_enum {
RING_F_NONE = 0,
+ RING_F_VMDQ,
RING_F_RSS,
RING_F_FDIR,
RING_F_ARRAY_SIZE /* must be last in enum set */
@@ -1026,13 +1151,56 @@ struct wx_hw_stats {
enum wx_state {
WX_STATE_RESETTING,
- WX_STATE_NBITS, /* must be last */
+ WX_STATE_SWFW_BUSY,
+ WX_STATE_PTP_RUNNING,
+ WX_STATE_PTP_TX_IN_PROGRESS,
+ WX_STATE_SERVICE_SCHED,
+ WX_STATE_NBITS /* must be last */
+};
+
+struct vf_data_storage {
+ struct pci_dev *vfdev;
+ unsigned char vf_mac_addr[ETH_ALEN];
+ bool spoofchk_enabled;
+ bool link_enable;
+ bool trusted;
+ int xcast_mode;
+ unsigned int vf_api;
+ bool clear_to_send;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ bool pf_set_mac;
+
+ u16 vf_mc_hashes[WX_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 vlan_count;
+ int link_state;
+};
+
+struct vf_macvlans {
+ struct list_head mvlist;
+ int vf;
+ bool free;
+ bool is_macvlan;
+ u8 vf_macvlan[ETH_ALEN];
};
enum wx_pf_flags {
+ WX_FLAG_MULTI_64_FUNC,
+ WX_FLAG_SWFW_RING,
+ WX_FLAG_VMDQ_ENABLED,
+ WX_FLAG_VLAN_PROMISC,
+ WX_FLAG_SRIOV_ENABLED,
+ WX_FLAG_IRQ_VECTOR_SHARED,
WX_FLAG_FDIR_CAPABLE,
WX_FLAG_FDIR_HASH,
WX_FLAG_FDIR_PERFECT,
+ WX_FLAG_RSC_CAPABLE,
+ WX_FLAG_RX_HWTSTAMP_ENABLED,
+ WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
+ WX_FLAG_PTP_PPS_ENABLED,
+ WX_FLAG_NEED_LINK_CONFIG,
+ WX_FLAG_NEED_SFP_RESET,
WX_PF_FLAGS_NBITS /* must be last */
};
@@ -1046,9 +1214,10 @@ struct wx {
struct pci_dev *pdev;
struct net_device *netdev;
struct wx_bus_info bus;
+ struct wx_mbx_info mbx;
struct wx_mac_info mac;
enum em_mac_type mac_type;
- enum sp_media_type media_type;
+ enum wx_media_type media_type;
struct wx_eeprom_info eeprom;
struct wx_addr_filter_info addr_ctrl;
struct wx_fc_info fc;
@@ -1066,8 +1235,12 @@ struct wx {
char eeprom_id[32];
char *driver_name;
enum wx_reset_type reset_type;
+ u8 swfw_index;
/* PHY stuff */
+ bool notify_down;
+ int adv_speed;
+ int adv_duplex;
unsigned int link;
int speed;
int duplex;
@@ -1099,6 +1272,8 @@ struct wx {
struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp;
struct wx_ring *rx_ring[64];
struct wx_q_vector *q_vector[64];
+ int num_rx_pools;
+ int num_rx_queues_per_pool;
unsigned int queues_per_pool;
struct msix_entry *msix_q_entries;
@@ -1120,6 +1295,7 @@ struct wx {
u32 wol;
u16 bd_number;
+ bool default_up;
struct wx_hw_stats stats;
u64 tx_busy;
@@ -1128,15 +1304,47 @@ struct wx {
u64 hw_csum_rx_good;
u64 hw_csum_rx_error;
u64 alloc_rx_buff_failed;
+ unsigned int num_vfs;
+ struct vf_data_storage *vfinfo;
+ struct vf_macvlans vf_mvs;
+ struct vf_macvlans *mv_list;
+ unsigned long fwd_bitmask;
u32 atr_sample_rate;
void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
void (*configure_fdir)(struct wx *wx);
+ int (*setup_tc)(struct net_device *netdev, u8 tc);
void (*do_reset)(struct net_device *netdev);
+ int (*ptp_setup_sdp)(struct wx *wx);
+
+ bool pps_enabled;
+ u64 pps_width;
+ u64 pps_edge_start;
+ u64 pps_edge_end;
+ u64 sec_to_cc;
+ u32 base_incval;
+ u32 tx_hwtstamp_pkts;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
+ u32 tx_hwtstamp_errors;
+ u32 rx_hwtstamp_cleared;
+ unsigned long last_overflow_check;
+ unsigned long last_rx_ptp_check;
+ unsigned long ptp_tx_start;
+ seqlock_t hw_tc_lock; /* seqlock for ptp */
+ struct cyclecounter hw_cc;
+ struct timecounter hw_tc;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct kernel_hwtstamp_config tstamp_config;
+ struct sk_buff *ptp_tx_skb;
+
+ struct timer_list service_timer;
+ struct work_struct service_task;
};
#define WX_INTR_ALL (~0ULL)
-#define WX_INTR_Q(i) BIT((i) + 1)
+#define WX_INTR_Q(i) BIT((i))
/* register operations */
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
@@ -1177,6 +1385,24 @@ rd64(struct wx *wx, u32 reg)
return (lsb | msb << 32);
}
+static inline u32
+rd32ptp(struct wx *wx, u32 reg)
+{
+ if (wx->mac.type == wx_mac_em)
+ return rd32(wx, reg);
+
+ return rd32(wx, reg + 0xB500);
+}
+
+static inline void
+wr32ptp(struct wx *wx, u32 reg, u32 value)
+{
+ if (wx->mac.type == wx_mac_em)
+ return wr32(wx, reg, value);
+
+ return wr32(wx, reg + 0xB500, value);
+}
+
/* On some domestic CPU platforms, sometimes IO is not synchronized with
* flushing memory, here use readl() to flush PCI read and write.
*/
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index e868f7ef4920..7e2d9ec38a30 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -138,6 +138,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = {
.set_channels = ngbe_set_channels,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
+ .get_ts_info = wx_get_ts_info,
+ .get_ts_stats = wx_get_ptp_stats,
};
void ngbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 53aeae2f884b..e0fc897b0a58 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -14,6 +14,9 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_hw.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_sriov.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
#include "ngbe_hw.h"
@@ -128,6 +131,10 @@ static int ngbe_sw_init(struct wx *wx)
wx->tx_work_limit = NGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = NGBE_DEFAULT_RX_WORK;
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
+ wx->setup_tc = ngbe_setup_tc;
+ set_bit(0, &wx->fwd_bitmask);
+
return 0;
}
@@ -154,7 +161,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues)
if (queues)
wx_intr_enable(wx, NGBE_INTR_ALL);
else
- wx_intr_enable(wx, NGBE_INTR_MISC);
+ wx_intr_enable(wx, NGBE_INTR_MISC(wx));
}
/**
@@ -167,7 +174,7 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
struct wx_q_vector *q_vector;
struct wx *wx = data;
struct pci_dev *pdev;
- u32 eicr;
+ u32 eicr, eicr_misc;
q_vector = wx->q_vector[0];
pdev = wx->pdev;
@@ -185,6 +192,10 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
if (!(pdev->msi_enabled))
wr32(wx, WX_PX_INTA, 1);
+ eicr_misc = wx_misc_isb(wx, WX_ISB_MISC);
+ if (unlikely(eicr_misc & NGBE_PX_MISC_IC_TIMESYNC))
+ wx_ptp_check_pps_event(wx);
+
wx->isb_mem[WX_ISB_MISC] = 0;
/* would disable interrupts here but it is auto disabled */
napi_schedule_irqoff(&q_vector->napi);
@@ -195,9 +206,13 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
+static irqreturn_t __ngbe_msix_misc(struct wx *wx, u32 eicr)
{
- struct wx *wx = data;
+ if (eicr & NGBE_PX_MISC_IC_VF_MBOX)
+ wx_msg_task(wx);
+
+ if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC))
+ wx_ptp_check_pps_event(wx);
/* re-enable the original interrupt state, no lsc, no queues */
if (netif_running(wx->netdev))
@@ -206,6 +221,35 @@ static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t ngbe_msix_misc(int __always_unused irq, void *data)
+{
+ struct wx *wx = data;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+
+ return __ngbe_msix_misc(wx, eicr);
+}
+
+static irqreturn_t ngbe_misc_and_queue(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ if (!eicr) {
+ /* queue */
+ q_vector = wx->q_vector[0];
+ napi_schedule_irqoff(&q_vector->napi);
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, true);
+ return IRQ_HANDLED;
+ }
+
+ return __ngbe_msix_misc(wx, eicr);
+}
+
/**
* ngbe_request_msix_irqs - Initialize MSI-X interrupts
* @wx: board private structure
@@ -238,8 +282,16 @@ static int ngbe_request_msix_irqs(struct wx *wx)
}
}
- err = request_irq(wx->msix_entry->vector,
- ngbe_msix_other, 0, netdev->name, wx);
+ /* Due to hardware design, when num_vfs < 7, pf can use 0 for misc and 1
+ * for queue. But when num_vfs == 7, vector[1] is assigned to vf6.
+ * Misc and queue should reuse interrupt vector[0].
+ */
+ if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags))
+ err = request_irq(wx->msix_entry->vector,
+ ngbe_misc_and_queue, 0, netdev->name, wx);
+ else
+ err = request_irq(wx->msix_entry->vector,
+ ngbe_msix_misc, 0, netdev->name, wx);
if (err) {
wx_err(wx, "request_irq for msix_other failed: %d\n", err);
@@ -291,6 +343,22 @@ static void ngbe_disable_device(struct wx *wx)
struct net_device *netdev = wx->netdev;
u32 i;
+ if (wx->num_vfs) {
+ /* Clear EITR Select mapping */
+ wr32(wx, WX_PX_ITRSEL, 0);
+
+ /* Mark all the VFs as inactive */
+ for (i = 0; i < wx->num_vfs; i++)
+ wx->vfinfo[i].clear_to_send = 0;
+ wx->notify_down = true;
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
+ wx->notify_down = false;
+
+ /* Disable all VFTE/VFRE TX/RX */
+ wx_disable_vf_rx_tx(wx);
+ }
+
/* disable all enabled rx queues */
for (i = 0; i < wx->num_rx_queues; i++)
/* this call also flushes the previous write */
@@ -313,10 +381,19 @@ static void ngbe_disable_device(struct wx *wx)
wx_update_stats(wx);
}
+static void ngbe_reset(struct wx *wx)
+{
+ wx_flush_sw_mac_table(wx);
+ wx_mac_set_default_filter(wx, wx->mac.addr);
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset(wx);
+}
+
void ngbe_down(struct wx *wx)
{
phylink_stop(wx->phylink);
ngbe_disable_device(wx);
+ ngbe_reset(wx);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
@@ -339,6 +416,11 @@ void ngbe_up(struct wx *wx)
ngbe_sfp_modules_txrx_powerctl(wx, true);
phylink_start(wx->phylink);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_PFRSTD, WX_CFG_PORT_CTL_PFRSTD);
+ if (wx->num_vfs)
+ wx_ping_all_vfs_with_link_status(wx, false);
}
/**
@@ -379,6 +461,8 @@ static int ngbe_open(struct net_device *netdev)
if (err)
goto err_dis_phy;
+ wx_ptp_init(wx);
+
ngbe_up(wx);
return 0;
@@ -407,6 +491,7 @@ static int ngbe_close(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ wx_ptp_stop(wx);
ngbe_down(wx);
wx_free_irq(wx);
wx_free_isb_resources(wx);
@@ -502,11 +587,14 @@ static const struct net_device_ops ngbe_netdev_ops = {
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
.ndo_fix_features = wx_fix_features,
+ .ndo_features_check = wx_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
.ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = wx_hwtstamp_set,
+ .ndo_hwtstamp_get = wx_hwtstamp_get,
};
/**
@@ -578,6 +666,10 @@ static int ngbe_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ /* The emerald supports up to 8 VFs per pf, but physical
+ * function also need one pool for basic networking.
+ */
+ pci_sriov_set_totalvfs(pdev, NGBE_MAX_VFS_DRV_LIMIT);
wx->driver_name = ngbe_driver_name;
ngbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &ngbe_netdev_ops;
@@ -607,7 +699,7 @@ static int ngbe_probe(struct pci_dev *pdev,
/* setup the private structure */
err = ngbe_sw_init(wx);
if (err)
- goto err_free_mac_table;
+ goto err_pci_release_regions;
/* check if flash load is done after hw power up */
err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST);
@@ -701,6 +793,7 @@ err_register:
err_clear_interrupt_scheme:
wx_clear_interrupt_scheme(wx);
err_free_mac_table:
+ kfree(wx->rss_key);
kfree(wx->mac_table);
err_pci_release_regions:
pci_release_selected_regions(pdev,
@@ -725,6 +818,7 @@ static void ngbe_remove(struct pci_dev *pdev)
struct net_device *netdev;
netdev = wx->netdev;
+ wx_disable_sriov(wx);
unregister_netdev(netdev);
phylink_destroy(wx->phylink);
pci_release_selected_regions(pdev,
@@ -784,6 +878,7 @@ static struct pci_driver ngbe_driver = {
.suspend = ngbe_suspend,
.resume = ngbe_resume,
.shutdown = ngbe_shutdown,
+ .sriov_configure = wx_pci_sriov_configure,
};
module_pci_driver(ngbe_driver);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index a5e9b779c44d..c63bb6e6f405 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -7,7 +7,9 @@
#include <linux/phy.h>
#include "../libwx/wx_type.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
@@ -64,6 +66,13 @@ static void ngbe_mac_config(struct phylink_config *config, unsigned int mode,
static void ngbe_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
+ struct wx *wx = phylink_to_wx(config);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
}
static void ngbe_mac_link_up(struct phylink_config *config,
@@ -103,6 +112,13 @@ static void ngbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
}
static const struct phylink_mac_ops ngbe_mac_ops = {
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index f48ed7fc1805..3b2ca7f47e33 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -70,17 +70,24 @@
/* Extended Interrupt Enable Set */
#define NGBE_PX_MISC_IEN_DEV_RST BIT(10)
+#define NGBE_PX_MISC_IEN_TIMESYNC BIT(11)
#define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
#define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
+#define NGBE_PX_MISC_IC_VF_MBOX BIT(23)
#define NGBE_PX_MISC_IEN_GPIO BIT(26)
#define NGBE_PX_MISC_IEN_MASK ( \
NGBE_PX_MISC_IEN_DEV_RST | \
+ NGBE_PX_MISC_IEN_TIMESYNC | \
NGBE_PX_MISC_IEN_ETH_LK | \
NGBE_PX_MISC_IEN_INT_ERR | \
+ NGBE_PX_MISC_IC_VF_MBOX | \
NGBE_PX_MISC_IEN_GPIO)
+/* Extended Interrupt Cause Read */
+#define NGBE_PX_MISC_IC_TIMESYNC BIT(11) /* time sync */
+
#define NGBE_INTR_ALL 0x1FF
-#define NGBE_INTR_MISC BIT(0)
+#define NGBE_INTR_MISC(A) BIT((A)->msix_entry->entry)
#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
#define NGBE_CFG_LAN_SPEED 0x14440
@@ -129,6 +136,7 @@
#define NGBE_MAX_RXD 8192
#define NGBE_MIN_RXD 128
+#define NGBE_MAX_VFS_DRV_LIMIT 7
extern char ngbe_driver_name[];
void ngbe_down(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index f74576fe7062..c757fa95e58e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -11,4 +11,5 @@ txgbe-objs := txgbe_main.o \
txgbe_phy.o \
txgbe_irq.o \
txgbe_fdir.o \
- txgbe_ethtool.o
+ txgbe_ethtool.o \
+ txgbe_aml.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
new file mode 100644
index 000000000000..dc87ccad9652
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/phylink.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
+#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
+#include "txgbe_type.h"
+#include "txgbe_aml.h"
+#include "txgbe_hw.h"
+
+void txgbe_gpio_init_aml(struct wx *wx)
+{
+ u32 status;
+
+ wr32(wx, WX_GPIO_INTTYPE_LEVEL, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3);
+ wr32(wx, WX_GPIO_INTEN, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3);
+
+ status = rd32(wx, WX_GPIO_INTSTATUS);
+ for (int i = 0; i < 6; i++) {
+ if (status & BIT(i))
+ wr32(wx, WX_GPIO_EOI, BIT(i));
+ }
+}
+
+irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ u32 status;
+
+ wr32(wx, WX_GPIO_INTMASK, 0xFF);
+ status = rd32(wx, WX_GPIO_INTSTATUS);
+ if (status & TXGBE_GPIOBIT_2) {
+ set_bit(WX_FLAG_NEED_SFP_RESET, wx->flags);
+ wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_2);
+ wx_service_event_schedule(wx);
+ }
+ if (status & TXGBE_GPIOBIT_3) {
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+ wx_service_event_schedule(wx);
+ wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_3);
+ }
+
+ wr32(wx, WX_GPIO_INTMASK, 0);
+ return IRQ_HANDLED;
+}
+
+int txgbe_test_hostif(struct wx *wx)
+{
+ struct txgbe_hic_ephy_getlink buffer;
+
+ if (wx->mac.type != wx_mac_aml)
+ return 0;
+
+ buffer.hdr.cmd = FW_PHY_GET_LINK_CMD;
+ buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_getlink) -
+ sizeof(struct wx_hic_hdr);
+ buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
+ WX_HI_COMMAND_TIMEOUT, true);
+}
+
+static int txgbe_identify_sfp_hostif(struct wx *wx, struct txgbe_hic_i2c_read *buffer)
+{
+ buffer->hdr.cmd = FW_READ_SFP_INFO_CMD;
+ buffer->hdr.buf_len = sizeof(struct txgbe_hic_i2c_read) -
+ sizeof(struct wx_hic_hdr);
+ buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ return wx_host_interface_command(wx, (u32 *)buffer,
+ sizeof(struct txgbe_hic_i2c_read),
+ WX_HI_COMMAND_TIMEOUT, true);
+}
+
+static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int duplex)
+{
+ struct txgbe_hic_ephy_setlink buffer;
+
+ buffer.hdr.cmd = FW_PHY_SET_LINK_CMD;
+ buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_setlink) -
+ sizeof(struct wx_hic_hdr);
+ buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ switch (speed) {
+ case SPEED_25000:
+ buffer.speed = TXGBE_LINK_SPEED_25GB_FULL;
+ break;
+ case SPEED_10000:
+ buffer.speed = TXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ }
+
+ buffer.fec_mode = TXGBE_PHY_FEC_AUTO;
+ buffer.autoneg = autoneg;
+ buffer.duplex = duplex;
+
+ return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
+ WX_HI_COMMAND_TIMEOUT, true);
+}
+
+static void txgbe_get_link_capabilities(struct wx *wx)
+{
+ struct txgbe *txgbe = wx->priv;
+
+ if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->sfp_interfaces))
+ wx->adv_speed = SPEED_25000;
+ else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->sfp_interfaces))
+ wx->adv_speed = SPEED_10000;
+ else
+ wx->adv_speed = SPEED_UNKNOWN;
+
+ wx->adv_duplex = wx->adv_speed == SPEED_UNKNOWN ?
+ DUPLEX_HALF : DUPLEX_FULL;
+}
+
+static void txgbe_get_phy_link(struct wx *wx, int *speed)
+{
+ u32 status;
+
+ status = rd32(wx, TXGBE_CFG_PORT_ST);
+ if (!(status & TXGBE_CFG_PORT_ST_LINK_UP))
+ *speed = SPEED_UNKNOWN;
+ else if (status & TXGBE_CFG_PORT_ST_LINK_AML_25G)
+ *speed = SPEED_25000;
+ else if (status & TXGBE_CFG_PORT_ST_LINK_AML_10G)
+ *speed = SPEED_10000;
+ else
+ *speed = SPEED_UNKNOWN;
+}
+
+int txgbe_set_phy_link(struct wx *wx)
+{
+ int speed, err;
+ u32 gpio;
+
+ /* Check RX signal */
+ gpio = rd32(wx, WX_GPIO_EXT);
+ if (gpio & TXGBE_GPIOBIT_3)
+ return -ENODEV;
+
+ txgbe_get_link_capabilities(wx);
+ if (wx->adv_speed == SPEED_UNKNOWN)
+ return -ENODEV;
+
+ txgbe_get_phy_link(wx, &speed);
+ if (speed == wx->adv_speed)
+ return 0;
+
+ err = txgbe_set_phy_link_hostif(wx, wx->adv_speed, 0, wx->adv_duplex);
+ if (err) {
+ wx_err(wx, "Failed to setup link\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sfp_id *id)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct txgbe *txgbe = wx->priv;
+
+ if (id->com_25g_code & (TXGBE_SFF_25GBASESR_CAPABLE |
+ TXGBE_SFF_25GBASEER_CAPABLE |
+ TXGBE_SFF_25GBASELR_CAPABLE)) {
+ phylink_set(modes, 25000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ }
+ if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) {
+ phylink_set(modes, 10000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) {
+ phylink_set(modes, 10000baseLR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+
+ if (phy_interface_empty(interfaces)) {
+ wx_err(wx, "unsupported SFP module\n");
+ return -EINVAL;
+ }
+
+ phylink_set(modes, Pause);
+ phylink_set(modes, Asym_Pause);
+ phylink_set(modes, FIBRE);
+ txgbe->link_port = PORT_FIBRE;
+
+ if (!linkmode_equal(txgbe->sfp_support, modes)) {
+ linkmode_copy(txgbe->sfp_support, modes);
+ phy_interface_and(txgbe->sfp_interfaces,
+ wx->phylink_config.supported_interfaces,
+ interfaces);
+ linkmode_copy(txgbe->advertising, modes);
+
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+ }
+
+ return 0;
+}
+
+int txgbe_identify_sfp(struct wx *wx)
+{
+ struct txgbe_hic_i2c_read buffer;
+ struct txgbe_sfp_id *id;
+ int err = 0;
+ u32 gpio;
+
+ gpio = rd32(wx, WX_GPIO_EXT);
+ if (gpio & TXGBE_GPIOBIT_2)
+ return -ENODEV;
+
+ err = txgbe_identify_sfp_hostif(wx, &buffer);
+ if (err) {
+ wx_err(wx, "Failed to identify SFP module\n");
+ return err;
+ }
+
+ id = &buffer.id;
+ if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP) {
+ wx_err(wx, "Invalid SFP module\n");
+ return -ENODEV;
+ }
+
+ err = txgbe_sfp_to_linkmodes(wx, id);
+ if (err)
+ return err;
+
+ if (gpio & TXGBE_GPIOBIT_3)
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+
+ return 0;
+}
+
+void txgbe_setup_link(struct wx *wx)
+{
+ struct txgbe *txgbe = wx->priv;
+
+ phy_interface_zero(txgbe->sfp_interfaces);
+ linkmode_zero(txgbe->sfp_support);
+
+ txgbe_identify_sfp(wx);
+}
+
+static void txgbe_get_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct wx *wx = phylink_to_wx(config);
+ int speed;
+
+ txgbe_get_phy_link(wx, &speed);
+ state->link = speed != SPEED_UNKNOWN;
+ state->speed = speed;
+ state->duplex = state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN;
+}
+
+static void txgbe_reconfig_mac(struct wx *wx)
+{
+ u32 wdg, fc;
+
+ wdg = rd32(wx, WX_MAC_WDG_TIMEOUT);
+ fc = rd32(wx, WX_MAC_RX_FLOW_CTRL);
+
+ wr32(wx, WX_MIS_RST, TXGBE_MIS_RST_MAC_RST(wx->bus.func));
+ /* wait for MAC reset complete */
+ usleep_range(1000, 1500);
+
+ wr32m(wx, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_CTL_LINK_STS_MOD,
+ TXGBE_MAC_MISC_CTL_LINK_BOTH);
+ wx_reset_mac(wx);
+
+ wr32(wx, WX_MAC_WDG_TIMEOUT, wdg);
+ wr32(wx, WX_MAC_RX_FLOW_CTRL, fc);
+}
+
+static void txgbe_mac_link_up_aml(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct wx *wx = phylink_to_wx(config);
+ u32 txcfg;
+
+ wx_fc_enable(wx, tx_pause, rx_pause);
+
+ txgbe_reconfig_mac(wx);
+ txgbe_enable_sec_tx_path(wx);
+
+ txcfg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
+ txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
+
+ switch (speed) {
+ case SPEED_25000:
+ txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G;
+ break;
+ case SPEED_10000:
+ txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_10G;
+ break;
+ default:
+ break;
+ }
+
+ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
+ wr32(wx, TXGBE_AML_MAC_TX_CFG, txcfg | TXGBE_AML_MAC_TX_CFG_TE);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
+}
+
+static void txgbe_mac_link_down_aml(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct wx *wx = phylink_to_wx(config);
+
+ wr32m(wx, TXGBE_AML_MAC_TX_CFG, TXGBE_AML_MAC_TX_CFG_TE, 0);
+ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
+}
+
+static void txgbe_mac_config_aml(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static const struct phylink_mac_ops txgbe_mac_ops_aml = {
+ .mac_config = txgbe_mac_config_aml,
+ .mac_link_down = txgbe_mac_link_down_aml,
+ .mac_link_up = txgbe_mac_link_up_aml,
+};
+
+int txgbe_phylink_init_aml(struct txgbe *txgbe)
+{
+ struct phylink_link_state state;
+ struct phylink_config *config;
+ struct wx *wx = txgbe->wx;
+ phy_interface_t phy_mode;
+ struct phylink *phylink;
+ int err;
+
+ config = &wx->phylink_config;
+ config->dev = &wx->netdev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_capabilities = MAC_25000FD | MAC_10000FD |
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ config->get_fixed_state = txgbe_get_link_state;
+
+ phy_mode = PHY_INTERFACE_MODE_25GBASER;
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces);
+
+ phylink = phylink_create(config, NULL, phy_mode, &txgbe_mac_ops_aml);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ state.speed = SPEED_25000;
+ state.duplex = DUPLEX_FULL;
+ err = phylink_set_fixed_link(phylink, &state);
+ if (err) {
+ wx_err(wx, "Failed to set fixed link\n");
+ return err;
+ }
+
+ wx->phylink = phylink;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
new file mode 100644
index 000000000000..25d4971ca0d9
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_AML_H_
+#define _TXGBE_AML_H_
+
+void txgbe_gpio_init_aml(struct wx *wx);
+irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data);
+int txgbe_test_hostif(struct wx *wx);
+int txgbe_set_phy_link(struct wx *wx);
+int txgbe_identify_sfp(struct wx *wx);
+void txgbe_setup_link(struct wx *wx);
+int txgbe_phylink_init_aml(struct txgbe *txgbe);
+
+#endif /* _TXGBE_AML_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index d98314b26c19..a4753402660e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -12,6 +12,31 @@
#include "txgbe_fdir.h"
#include "txgbe_ethtool.h"
+int txgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct txgbe *txgbe = wx->priv;
+ int err;
+
+ if (wx->mac.type == wx_mac_aml40)
+ return -EOPNOTSUPP;
+
+ err = wx_get_link_ksettings(netdev, cmd);
+ if (err)
+ return err;
+
+ if (wx->mac.type == wx_mac_sp)
+ return 0;
+
+ cmd->base.port = txgbe->link_port;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ linkmode_copy(cmd->link_modes.supported, txgbe->sfp_support);
+ linkmode_copy(cmd->link_modes.advertising, txgbe->advertising);
+
+ return 0;
+}
+
static int txgbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -342,12 +367,19 @@ static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe,
queue = TXGBE_RDB_FDIR_DROP_QUEUE;
} else {
u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
- if (ring >= wx->num_rx_queues)
+ if (!vf && ring >= wx->num_rx_queues)
+ return -EINVAL;
+ else if (vf && (vf > wx->num_vfs ||
+ ring >= wx->num_rx_queues_per_pool))
return -EINVAL;
/* Map the ring onto the absolute queue index */
- queue = wx->rx_ring[ring]->reg_idx;
+ if (!vf)
+ queue = wx->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) * wx->num_rx_queues_per_pool) + ring;
}
/* Don't allow indexes to exist outside of available space */
@@ -510,7 +542,7 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.get_drvinfo = wx_get_drvinfo,
.nway_reset = wx_nway_reset,
.get_link = ethtool_op_get_link,
- .get_link_ksettings = wx_get_link_ksettings,
+ .get_link_ksettings = txgbe_get_link_ksettings,
.set_link_ksettings = wx_set_link_ksettings,
.get_sset_count = wx_get_sset_count,
.get_strings = wx_get_strings,
@@ -529,6 +561,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_rxnfc = txgbe_set_rxnfc,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
+ .get_ts_info = wx_get_ts_info,
+ .get_ts_stats = wx_get_ptp_stats,
};
void txgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
index ace1b3571012..66dbc8ec1bb6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
@@ -4,6 +4,8 @@
#ifndef _TXGBE_ETHTOOL_H_
#define _TXGBE_ETHTOOL_H_
+int txgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd);
void txgbe_set_ethtool_ops(struct net_device *netdev);
#endif /* _TXGBE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
index ef50efbaec0f..a84010828551 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
@@ -307,6 +307,7 @@ void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype)
int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
{
u32 fdirm = 0, fdirtcpm = 0, flex = 0;
+ int index, offset;
/* Program the relevant mask registers. If src/dst_port or src/dst_addr
* are zero, then assume a full mask for that field. Also assume that
@@ -352,15 +353,17 @@ int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
wr32(wx, TXGBE_RDB_FDIR_OTHER_MSK, fdirm);
- flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
- flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
+ index = VMDQ_P(0) / 4;
+ offset = VMDQ_P(0) % 4;
+ flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index));
+ flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8));
flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
- TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
+ TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8);
switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
case 0x0000:
/* Mask Flex Bytes */
- flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK;
+ flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK << (offset * 8);
break;
case 0xFFFF:
break;
@@ -368,7 +371,7 @@ int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
wx_err(wx, "Error on flexible byte mask\n");
return -EINVAL;
}
- wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
+ wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex);
/* store the TCP/UDP port masks, bit reversed from port layout */
fdirtcpm = ntohs(input_mask->formatted.dst_port);
@@ -516,14 +519,16 @@ static void txgbe_fdir_enable(struct wx *wx, u32 fdirctrl)
static void txgbe_init_fdir_signature(struct wx *wx)
{
u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K;
+ int index = VMDQ_P(0) / 4;
+ int offset = VMDQ_P(0) % 4;
u32 flex = 0;
- flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
- flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
+ flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index));
+ flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8));
flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
- TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
- wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
+ TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8);
+ wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex);
/* Continue setup of fdirctrl register bits:
* Move the flexible bytes to use the ethertype - shift 6 words
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index cd1372da92a9..e551ae0e2069 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -99,9 +99,15 @@ static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum)
}
local_buffer = eeprom_ptrs;
- for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++)
+ for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) {
+ if (wx->mac.type == wx_mac_aml) {
+ if (i >= TXGBE_EEPROM_I2C_SRART_PTR &&
+ i < TXGBE_EEPROM_I2C_END_PTR)
+ local_buffer[i] = 0xffff;
+ }
if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
*checksum += local_buffer[i];
+ }
kvfree(eeprom_ptrs);
@@ -182,7 +188,7 @@ int txgbe_reset_hw(struct wx *wx)
if (status != 0)
return status;
- if (wx->media_type != sp_media_copper) {
+ if (wx->media_type != wx_media_copper) {
u32 val;
val = WX_MIS_RST_LAN_RST(wx->bus.func);
@@ -197,6 +203,12 @@ int txgbe_reset_hw(struct wx *wx)
txgbe_reset_misc(wx);
+ if (wx->mac.type != wx_mac_sp) {
+ wr32(wx, TXGBE_PX_PF_BME, 0x1);
+ wr32m(wx, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL,
+ TXGBE_RDM_RSC_CTL_FREE_CTL);
+ }
+
wx_clear_hw_cntrs(wx);
/* Store the permanent mac address */
@@ -206,7 +218,7 @@ int txgbe_reset_hw(struct wx *wx)
* clear the multicast table. Also reset num_rar_entries to 128,
* since we modify this value when programming the SAN MAC address.
*/
- wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
+ wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES;
wx_init_rx_addrs(wx);
pci_set_master(wx->pdev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index 0ee73a265545..3885283681ec 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -6,10 +6,13 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
#include "txgbe_type.h"
#include "txgbe_phy.h"
#include "txgbe_irq.h"
+#include "txgbe_aml.h"
/**
* txgbe_irq_enable - Enable default interrupt generation settings
@@ -18,10 +21,17 @@
**/
void txgbe_irq_enable(struct wx *wx, bool queues)
{
- wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
+ u32 misc_ien = TXGBE_PX_MISC_IEN_MASK;
+
+ if (wx->mac.type == wx_mac_aml) {
+ misc_ien |= TXGBE_PX_MISC_GPIO;
+ txgbe_gpio_init_aml(wx);
+ }
+
+ wr32(wx, WX_PX_MISC_IEN, misc_ien);
/* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
+ wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
if (queues)
wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
}
@@ -68,7 +78,6 @@ free_queue_irqs:
free_irq(wx->msix_q_entries[vector].vector,
wx->q_vector[vector]);
}
- wx_reset_interrupt_capability(wx);
return err;
}
@@ -80,6 +89,14 @@ static int txgbe_request_link_irq(struct txgbe *txgbe)
IRQF_ONESHOT, "txgbe-link-irq", txgbe);
}
+static int txgbe_request_gpio_irq(struct txgbe *txgbe)
+{
+ txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ return request_threaded_irq(txgbe->gpio_irq, NULL,
+ txgbe_gpio_irq_handler_aml,
+ IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
+}
+
static const struct irq_chip txgbe_irq_chip = {
.name = "txgbe-misc-irq",
};
@@ -109,8 +126,15 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
struct wx *wx = txgbe->wx;
u32 eicr;
- if (wx->pdev->msix_enabled)
+ if (wx->pdev->msix_enabled) {
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ txgbe->eicr = eicr;
+ if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) {
+ wx_msg_task(txgbe->wx);
+ wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
+ }
return IRQ_WAKE_THREAD;
+ }
eicr = wx_misc_isb(wx, WX_ISB_VEC0);
if (!eicr) {
@@ -129,6 +153,8 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
q_vector = wx->q_vector[0];
napi_schedule_irqoff(&q_vector->napi);
+ txgbe->eicr = wx_misc_isb(wx, WX_ISB_MISC);
+
return IRQ_WAKE_THREAD;
}
@@ -140,15 +166,24 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
unsigned int sub_irq;
u32 eicr;
- eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ eicr = txgbe->eicr;
if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
TXGBE_PX_MISC_ETH_AN)) {
sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
handle_nested_irq(sub_irq);
nhandled++;
}
+ if (eicr & TXGBE_PX_MISC_GPIO) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+ if (unlikely(eicr & TXGBE_PX_MISC_IC_TIMESYNC)) {
+ wx_ptp_check_pps_event(wx);
+ nhandled++;
+ }
- wx_intr_enable(wx, TXGBE_INTR_MISC);
+ wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
@@ -166,9 +201,16 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe)
void txgbe_free_misc_irq(struct txgbe *txgbe)
{
+ if (txgbe->wx->mac.type == wx_mac_aml40)
+ return;
+
+ if (txgbe->wx->mac.type == wx_mac_aml)
+ free_irq(txgbe->gpio_irq, txgbe);
+
free_irq(txgbe->link_irq, txgbe);
free_irq(txgbe->misc.irq, txgbe);
txgbe_del_irq_domain(txgbe);
+ txgbe->wx->misc_irq_domain = false;
}
int txgbe_setup_misc_irq(struct txgbe *txgbe)
@@ -177,9 +219,12 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int hwirq, err;
- txgbe->misc.nirqs = 1;
- txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
- &txgbe_misc_irq_domain_ops, txgbe);
+ if (wx->mac.type == wx_mac_aml40)
+ goto skip_sp_irq;
+
+ txgbe->misc.nirqs = TXGBE_IRQ_MAX;
+ txgbe->misc.domain = irq_domain_create_simple(NULL, txgbe->misc.nirqs, 0,
+ &txgbe_misc_irq_domain_ops, txgbe);
if (!txgbe->misc.domain)
return -ENOMEM;
@@ -206,10 +251,20 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
if (err)
goto free_msic_irq;
+ if (wx->mac.type == wx_mac_sp)
+ goto skip_sp_irq;
+
+ err = txgbe_request_gpio_irq(txgbe);
+ if (err)
+ goto free_link_irq;
+
+skip_sp_irq:
wx->misc_irq_domain = true;
return 0;
+free_link_irq:
+ free_irq(txgbe->link_irq, txgbe);
free_msic_irq:
free_irq(txgbe->misc.irq, txgbe);
del_misc_irq:
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index f77450268036..a5867f3c93fc 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -8,15 +8,20 @@
#include <linux/string.h>
#include <linux/etherdevice.h>
#include <linux/phylink.h>
+#include <net/udp_tunnel.h>
#include <net/ip.h>
#include <linux/if_vlan.h>
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_sriov.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
#include "txgbe_phy.h"
+#include "txgbe_aml.h"
#include "txgbe_irq.h"
#include "txgbe_fdir.h"
#include "txgbe_ethtool.h"
@@ -34,6 +39,12 @@ char txgbe_driver_name[] = "txgbe";
static const struct pci_device_id txgbe_pci_tbl[] = {
{ PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0},
{ PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5010), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5110), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5025), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5125), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5040), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5140), 0},
/* required last entry */
{ .device = 0 }
};
@@ -78,9 +89,62 @@ static int txgbe_enumerate_functions(struct wx *wx)
return physfns;
}
+static void txgbe_sfp_detection_subtask(struct wx *wx)
+{
+ int err;
+
+ if (!test_bit(WX_FLAG_NEED_SFP_RESET, wx->flags))
+ return;
+
+ /* wait for SFP module ready */
+ msleep(200);
+
+ err = txgbe_identify_sfp(wx);
+ if (err)
+ return;
+
+ clear_bit(WX_FLAG_NEED_SFP_RESET, wx->flags);
+}
+
+static void txgbe_link_config_subtask(struct wx *wx)
+{
+ int err;
+
+ if (!test_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags))
+ return;
+
+ err = txgbe_set_phy_link(wx);
+ if (err)
+ return;
+
+ clear_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+}
+
+/**
+ * txgbe_service_task - manages and runs subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void txgbe_service_task(struct work_struct *work)
+{
+ struct wx *wx = container_of(work, struct wx, service_task);
+
+ txgbe_sfp_detection_subtask(wx);
+ txgbe_link_config_subtask(wx);
+
+ wx_service_event_complete(wx);
+}
+
+static void txgbe_init_service(struct wx *wx)
+{
+ timer_setup(&wx->service_timer, wx_service_timer, 0);
+ INIT_WORK(&wx->service_task, txgbe_service_task);
+ clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
+}
+
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
+ u32 reg;
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -89,7 +153,27 @@ static void txgbe_up_complete(struct wx *wx)
smp_mb__before_atomic();
wx_napi_enable_all(wx);
- phylink_start(wx->phylink);
+ switch (wx->mac.type) {
+ case wx_mac_aml40:
+ reg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
+ reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
+ reg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G;
+ wr32(wx, WX_MAC_TX_CFG, reg);
+ txgbe_enable_sec_tx_path(wx);
+ netif_carrier_on(wx->netdev);
+ break;
+ case wx_mac_aml:
+ /* Enable TX laser */
+ wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, 0);
+ txgbe_setup_link(wx);
+ phylink_start(wx->phylink);
+ break;
+ case wx_mac_sp:
+ phylink_start(wx->phylink);
+ break;
+ default:
+ break;
+ }
/* clear any pending interrupts, may auto mask */
rd32(wx, WX_PX_IC(0));
@@ -99,6 +183,13 @@ static void txgbe_up_complete(struct wx *wx)
/* enable transmits */
netif_tx_start_all_queues(netdev);
+ mod_timer(&wx->service_timer, jiffies);
+
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_PFRSTD,
+ WX_CFG_PORT_CTL_PFRSTD);
+ /* update setting rx tx for all active vfs */
+ wx_set_all_vfs(wx);
}
static void txgbe_reset(struct wx *wx)
@@ -116,6 +207,9 @@ static void txgbe_reset(struct wx *wx)
memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len);
wx_flush_sw_mac_table(wx);
wx_mac_set_default_filter(wx, old_addr);
+
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset(wx);
}
static void txgbe_disable_device(struct wx *wx)
@@ -138,12 +232,24 @@ static void txgbe_disable_device(struct wx *wx)
wx_irq_disable(wx);
wx_napi_disable_all(wx);
+ timer_delete_sync(&wx->service_timer);
+
if (wx->bus.func < 2)
wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0);
else
wx_err(wx, "%s: invalid bus lan id %d\n",
__func__, wx->bus.func);
+ if (wx->num_vfs) {
+ /* Clear EITR Select mapping */
+ wr32(wx, WX_PX_ITRSEL, 0);
+ /* Mark all the VFs as inactive */
+ for (i = 0; i < wx->num_vfs; i++)
+ wx->vfinfo[i].clear_to_send = 0;
+ /* update setting rx tx for all active vfs */
+ wx_set_all_vfs(wx);
+ }
+
if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
/* disable mac transmiter */
@@ -167,7 +273,22 @@ void txgbe_down(struct wx *wx)
{
txgbe_disable_device(wx);
txgbe_reset(wx);
- phylink_stop(wx->phylink);
+
+ switch (wx->mac.type) {
+ case wx_mac_aml40:
+ netif_carrier_off(wx->netdev);
+ break;
+ case wx_mac_aml:
+ phylink_stop(wx->phylink);
+ /* Disable TX laser */
+ wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, TXGBE_GPIOBIT_1);
+ break;
+ case wx_mac_sp:
+ phylink_stop(wx->phylink);
+ break;
+ default:
+ break;
+ }
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
@@ -176,6 +297,7 @@ void txgbe_down(struct wx *wx)
void txgbe_up(struct wx *wx)
{
wx_configure(wx);
+ wx_ptp_init(wx);
txgbe_up_complete(wx);
}
@@ -192,6 +314,16 @@ static void txgbe_init_type_code(struct wx *wx)
case TXGBE_DEV_ID_WX1820:
wx->mac.type = wx_mac_sp;
break;
+ case TXGBE_DEV_ID_AML5010:
+ case TXGBE_DEV_ID_AML5110:
+ case TXGBE_DEV_ID_AML5025:
+ case TXGBE_DEV_ID_AML5125:
+ wx->mac.type = wx_mac_aml;
+ break;
+ case TXGBE_DEV_ID_AML5040:
+ case TXGBE_DEV_ID_AML5140:
+ wx->mac.type = wx_mac_aml40;
+ break;
default:
wx->mac.type = wx_mac_unknown;
break;
@@ -199,25 +331,25 @@ static void txgbe_init_type_code(struct wx *wx)
switch (device_type) {
case TXGBE_ID_SFP:
- wx->media_type = sp_media_fiber;
+ wx->media_type = wx_media_fiber;
break;
case TXGBE_ID_XAUI:
case TXGBE_ID_SGMII:
- wx->media_type = sp_media_copper;
+ wx->media_type = wx_media_copper;
break;
case TXGBE_ID_KR_KX_KX4:
case TXGBE_ID_MAC_XAUI:
case TXGBE_ID_MAC_SGMII:
- wx->media_type = sp_media_backplane;
+ wx->media_type = wx_media_backplane;
break;
case TXGBE_ID_SFI_XAUI:
if (wx->bus.func == 0)
- wx->media_type = sp_media_fiber;
+ wx->media_type = wx_media_fiber;
else
- wx->media_type = sp_media_copper;
+ wx->media_type = wx_media_copper;
break;
default:
- wx->media_type = sp_media_unknown;
+ wx->media_type = wx_media_unknown;
break;
}
}
@@ -231,13 +363,13 @@ static int txgbe_sw_init(struct wx *wx)
u16 msix_count = 0;
int err;
- wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
- wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
- wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
- wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
- wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE;
- wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE;
- wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ;
+ wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES;
+ wx->mac.max_tx_queues = TXGBE_MAX_TXQ;
+ wx->mac.max_rx_queues = TXGBE_MAX_RXQ;
+ wx->mac.mcft_size = TXGBE_MC_TBL_SIZE;
+ wx->mac.vft_size = TXGBE_VFT_TBL_SIZE;
+ wx->mac.rx_pb_size = TXGBE_RX_PB_SIZE;
+ wx->mac.tx_pb_size = TXGBE_TDB_PB_SZ;
/* PCI config space info */
err = wx_sw_init(wx);
@@ -265,6 +397,9 @@ static int txgbe_sw_init(struct wx *wx)
wx->atr = txgbe_atr;
wx->configure_fdir = txgbe_configure_fdir;
+ set_bit(WX_FLAG_RSC_CAPABLE, wx->flags);
+ set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags);
+
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
@@ -272,12 +407,27 @@ static int txgbe_sw_init(struct wx *wx)
/* set default ring sizes */
wx->tx_ring_count = TXGBE_DEFAULT_TXD;
wx->rx_ring_count = TXGBE_DEFAULT_RXD;
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
/* set default work limits */
wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
+ wx->setup_tc = txgbe_setup_tc;
wx->do_reset = txgbe_do_reset;
+ set_bit(0, &wx->fwd_bitmask);
+
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ break;
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ set_bit(WX_FLAG_SWFW_RING, wx->flags);
+ wx->swfw_index = 0;
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -308,10 +458,14 @@ static int txgbe_open(struct net_device *netdev)
wx_configure(wx);
- err = txgbe_request_queue_irqs(wx);
+ err = txgbe_setup_misc_irq(wx->priv);
if (err)
goto err_free_resources;
+ err = txgbe_request_queue_irqs(wx);
+ if (err)
+ goto err_free_misc_irq;
+
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
if (err)
@@ -321,12 +475,17 @@ static int txgbe_open(struct net_device *netdev)
if (err)
goto err_free_irq;
+ wx_ptp_init(wx);
+
txgbe_up_complete(wx);
return 0;
err_free_irq:
wx_free_irq(wx);
+err_free_misc_irq:
+ txgbe_free_misc_irq(wx->priv);
+ wx_reset_interrupt_capability(wx);
err_free_resources:
wx_free_resources(wx);
err_reset:
@@ -344,6 +503,7 @@ err_reset:
*/
static void txgbe_close_suspend(struct wx *wx)
{
+ wx_ptp_suspend(wx);
txgbe_disable_device(wx);
wx_free_resources(wx);
}
@@ -363,8 +523,10 @@ static int txgbe_close(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ wx_ptp_stop(wx);
txgbe_down(wx);
wx_free_irq(wx);
+ txgbe_free_misc_irq(wx->priv);
wx_free_resources(wx);
txgbe_fdir_filter_exit(wx);
wx_control_hw(wx, false);
@@ -410,7 +572,6 @@ static void txgbe_shutdown(struct pci_dev *pdev)
int txgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct wx *wx = netdev_priv(dev);
- struct txgbe *txgbe = wx->priv;
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
@@ -421,7 +582,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
else
txgbe_reset(wx);
- txgbe_free_misc_irq(txgbe);
wx_clear_interrupt_scheme(wx);
if (tc)
@@ -430,7 +590,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
netdev_reset_tc(dev);
wx_init_interrupt_scheme(wx);
- txgbe_setup_misc_irq(txgbe);
if (netif_running(dev))
txgbe_open(dev);
@@ -466,6 +625,39 @@ void txgbe_do_reset(struct net_device *netdev)
txgbe_reset(wx);
}
+static int txgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
+{
+ struct wx *wx = netdev_priv(dev);
+ struct udp_tunnel_info ti;
+
+ udp_tunnel_nic_get_port(dev, table, 0, &ti);
+ switch (ti.type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ wr32(wx, TXGBE_CFG_VXLAN, ntohs(ti.port));
+ break;
+ case UDP_TUNNEL_TYPE_VXLAN_GPE:
+ wr32(wx, TXGBE_CFG_VXLAN_GPE, ntohs(ti.port));
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ wr32(wx, TXGBE_CFG_GENEVE, ntohs(ti.port));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct udp_tunnel_nic_info txgbe_udp_tunnels = {
+ .sync_table = txgbe_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
+
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
@@ -474,11 +666,14 @@ static const struct net_device_ops txgbe_netdev_ops = {
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
.ndo_fix_features = wx_fix_features,
+ .ndo_features_check = wx_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
.ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = wx_hwtstamp_set,
+ .ndo_hwtstamp_get = wx_hwtstamp_get,
};
/**
@@ -552,14 +747,19 @@ static int txgbe_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ /* The sapphire supports up to 63 VFs per pf, but physical
+ * function also need one pool for basic networking.
+ */
+ pci_sriov_set_totalvfs(pdev, TXGBE_MAX_VFS_DRV_LIMIT);
wx->driver_name = txgbe_driver_name;
txgbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &txgbe_netdev_ops;
+ netdev->udp_tunnel_nic_info = &txgbe_udp_tunnels;
/* setup the private structure */
err = txgbe_sw_init(wx);
if (err)
- goto err_free_mac_table;
+ goto err_pci_release_regions;
/* check if flash load is done after hw power up */
err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST);
@@ -600,6 +800,7 @@ static int txgbe_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_HIGHDMA;
netdev->hw_features |= NETIF_F_GRO;
netdev->features |= NETIF_F_GRO;
+ netdev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
@@ -621,9 +822,11 @@ static int txgbe_probe(struct pci_dev *pdev,
eth_hw_addr_set(netdev, wx->mac.perm_addr);
wx_mac_set_default_filter(wx, wx->mac.perm_addr);
+ txgbe_init_service(wx);
+
err = wx_init_interrupt_scheme(wx);
if (err)
- goto err_free_mac_table;
+ goto err_cancel_service;
/* Save off EEPROM version number and Option Rom version which
* together make a unique identify for the eeprom
@@ -666,6 +869,13 @@ static int txgbe_probe(struct pci_dev *pdev,
if (etrack_id < 0x20010)
dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n");
+ err = txgbe_test_hostif(wx);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Mismatched Firmware version\n");
+ err = -EIO;
+ goto err_release_hw;
+ }
+
txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL);
if (!txgbe) {
err = -ENOMEM;
@@ -677,13 +887,9 @@ static int txgbe_probe(struct pci_dev *pdev,
txgbe_init_fdir(txgbe);
- err = txgbe_setup_misc_irq(txgbe);
- if (err)
- goto err_release_hw;
-
err = txgbe_init_phy(txgbe);
if (err)
- goto err_free_misc_irq;
+ goto err_release_hw;
err = register_netdev(netdev);
if (err)
@@ -711,12 +917,14 @@ static int txgbe_probe(struct pci_dev *pdev,
err_remove_phy:
txgbe_remove_phy(txgbe);
-err_free_misc_irq:
- txgbe_free_misc_irq(txgbe);
err_release_hw:
wx_clear_interrupt_scheme(wx);
wx_control_hw(wx, false);
+err_cancel_service:
+ timer_delete_sync(&wx->service_timer);
+ cancel_work_sync(&wx->service_task);
err_free_mac_table:
+ kfree(wx->rss_key);
kfree(wx->mac_table);
err_pci_release_regions:
pci_release_selected_regions(pdev,
@@ -741,11 +949,13 @@ static void txgbe_remove(struct pci_dev *pdev)
struct txgbe *txgbe = wx->priv;
struct net_device *netdev;
+ cancel_work_sync(&wx->service_task);
+
netdev = wx->netdev;
+ wx_disable_sriov(wx);
unregister_netdev(netdev);
txgbe_remove_phy(txgbe);
- txgbe_free_misc_irq(txgbe);
wx_free_isb_resources(wx);
pci_release_selected_regions(pdev,
@@ -764,11 +974,12 @@ static struct pci_driver txgbe_driver = {
.probe = txgbe_probe,
.remove = txgbe_remove,
.shutdown = txgbe_shutdown,
+ .sriov_configure = wx_pci_sriov_configure,
};
module_pci_driver(txgbe_driver);
MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl);
MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
-MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver");
+MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 1ae68f94dd49..03f1b9bc604d 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -15,8 +15,12 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
+#include "../libwx/wx_sriov.h"
+#include "../libwx/wx_mbx.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
+#include "txgbe_aml.h"
#include "txgbe_phy.h"
#include "txgbe_hw.h"
@@ -162,7 +166,7 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi
struct wx *wx = phylink_to_wx(config);
struct txgbe *txgbe = wx->priv;
- if (wx->media_type != sp_media_copper)
+ if (wx->media_type != wx_media_copper)
return txgbe->pcs;
return NULL;
@@ -179,6 +183,12 @@ static void txgbe_mac_link_down(struct phylink_config *config,
struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
}
static void txgbe_mac_link_up(struct phylink_config *config,
@@ -215,6 +225,13 @@ static void txgbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
wdg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, wdg);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
}
static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
@@ -262,7 +279,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD |
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
- if (wx->media_type == sp_media_copper) {
+ if (wx->media_type == wx_media_copper) {
phy_mode = PHY_INTERFACE_MODE_XAUI;
__set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces);
} else {
@@ -302,7 +319,10 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data)
status = rd32(wx, TXGBE_CFG_PORT_ST);
up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP);
- phylink_pcs_change(txgbe->pcs, up);
+ if (txgbe->pcs)
+ phylink_pcs_change(txgbe->pcs, up);
+ else
+ phylink_mac_change(wx->phylink, up);
return IRQ_HANDLED;
}
@@ -557,8 +577,18 @@ int txgbe_init_phy(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int ret;
- if (txgbe->wx->media_type == sp_media_copper)
- return txgbe_ext_phy_init(txgbe);
+ switch (wx->mac.type) {
+ case wx_mac_aml40:
+ return 0;
+ case wx_mac_aml:
+ return txgbe_phylink_init_aml(txgbe);
+ case wx_mac_sp:
+ if (wx->media_type == wx_media_copper)
+ return txgbe_ext_phy_init(txgbe);
+ break;
+ default:
+ break;
+ }
ret = txgbe_swnodes_register(txgbe);
if (ret) {
@@ -621,10 +651,21 @@ err_unregister_swnode:
void txgbe_remove_phy(struct txgbe *txgbe)
{
- if (txgbe->wx->media_type == sp_media_copper) {
- phylink_disconnect_phy(txgbe->wx->phylink);
+ switch (txgbe->wx->mac.type) {
+ case wx_mac_aml40:
+ return;
+ case wx_mac_aml:
phylink_destroy(txgbe->wx->phylink);
return;
+ case wx_mac_sp:
+ if (txgbe->wx->media_type == wx_media_copper) {
+ phylink_disconnect_phy(txgbe->wx->phylink);
+ phylink_destroy(txgbe->wx->phylink);
+ return;
+ }
+ break;
+ default:
+ break;
}
platform_device_unregister(txgbe->sfp_dev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
index 3938985355ed..a32b19d71ea2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -8,4 +8,4 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data);
int txgbe_init_phy(struct txgbe *txgbe);
void txgbe_remove_phy(struct txgbe *txgbe);
-#endif /* _TXGBE_NODE_H_ */
+#endif /* _TXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 629a13e96b85..41915d7dd372 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -6,10 +6,18 @@
#include <linux/property.h>
#include <linux/irq.h>
+#include <linux/phy.h>
+#include "../libwx/wx_type.h"
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
#define TXGBE_DEV_ID_WX1820 0x2001
+#define TXGBE_DEV_ID_AML5010 0x5010
+#define TXGBE_DEV_ID_AML5110 0x5110
+#define TXGBE_DEV_ID_AML5025 0x5025
+#define TXGBE_DEV_ID_AML5125 0x5125
+#define TXGBE_DEV_ID_AML5040 0x5040
+#define TXGBE_DEV_ID_AML5140 0x5140
/* Subsystem IDs */
/* SFP */
@@ -44,6 +52,8 @@
/**************** SP Registers ****************************/
/* chip control Registers */
+#define TXGBE_MIS_RST 0x1000C
+#define TXGBE_MIS_RST_MAC_RST(_i) BIT(20 - (_i) * 3)
#define TXGBE_MIS_PRB_CTL 0x10010
#define TXGBE_MIS_PRB_CTL_LAN_UP(_i) BIT(1 - (_i))
/* FMGR Registers */
@@ -56,6 +66,11 @@
#define TXGBE_TS_CTL 0x10300
#define TXGBE_TS_CTL_EVAL_MD BIT(31)
+/* MAC Misc Registers */
+#define TXGBE_MAC_MISC_CTL 0x11F00
+#define TXGBE_MAC_MISC_CTL_LINK_STS_MOD BIT(0)
+#define TXGBE_MAC_MISC_CTL_LINK_PCS FIELD_PREP(BIT(0), 0)
+#define TXGBE_MAC_MISC_CTL_LINK_BOTH FIELD_PREP(BIT(0), 1)
/* GPIO register bit */
#define TXGBE_GPIOBIT_0 BIT(0) /* I:tx fault */
#define TXGBE_GPIOBIT_1 BIT(1) /* O:tx disabled */
@@ -67,19 +82,27 @@
/* Extended Interrupt Enable Set */
#define TXGBE_PX_MISC_ETH_LKDN BIT(8)
#define TXGBE_PX_MISC_DEV_RST BIT(10)
+#define TXGBE_PX_MISC_IC_TIMESYNC BIT(11)
#define TXGBE_PX_MISC_ETH_EVENT BIT(17)
#define TXGBE_PX_MISC_ETH_LK BIT(18)
#define TXGBE_PX_MISC_ETH_AN BIT(19)
#define TXGBE_PX_MISC_INT_ERR BIT(20)
+#define TXGBE_PX_MISC_IC_VF_MBOX BIT(23)
#define TXGBE_PX_MISC_GPIO BIT(26)
#define TXGBE_PX_MISC_IEN_MASK \
(TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \
TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \
- TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR)
+ TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \
+ TXGBE_PX_MISC_IC_VF_MBOX | TXGBE_PX_MISC_IC_TIMESYNC)
/* Port cfg registers */
#define TXGBE_CFG_PORT_ST 0x14404
#define TXGBE_CFG_PORT_ST_LINK_UP BIT(0)
+#define TXGBE_CFG_PORT_ST_LINK_AML_25G BIT(3)
+#define TXGBE_CFG_PORT_ST_LINK_AML_10G BIT(4)
+#define TXGBE_CFG_VXLAN 0x14410
+#define TXGBE_CFG_VXLAN_GPE 0x14414
+#define TXGBE_CFG_GENEVE 0x14418
/* I2C registers */
#define TXGBE_I2C_BASE 0x14900
@@ -137,6 +160,17 @@
#define TXGBE_RDB_FDIR_FLEX_CFG_MSK BIT(2)
#define TXGBE_RDB_FDIR_FLEX_CFG_OFST(v) FIELD_PREP(GENMASK(7, 3), v)
+/*************************** Amber Lite Registers ****************************/
+#define TXGBE_PX_PF_BME 0x4B8
+#define TXGBE_AML_MAC_TX_CFG 0x11000
+#define TXGBE_AML_MAC_TX_CFG_TE BIT(0)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_MASK GENMASK(30, 27)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_40G FIELD_PREP(GENMASK(30, 27), 0)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_25G FIELD_PREP(GENMASK(30, 27), 2)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_10G FIELD_PREP(GENMASK(30, 27), 8)
+#define TXGBE_RDM_RSC_CTL 0x1200C
+#define TXGBE_RDM_RSC_CTL_FREE_CTL BIT(7)
+
/* Checksum and EEPROM pointers */
#define TXGBE_EEPROM_LAST_WORD 0x800
#define TXGBE_EEPROM_CHECKSUM 0x2F
@@ -144,6 +178,8 @@
#define TXGBE_EEPROM_VERSION_L 0x1D
#define TXGBE_EEPROM_VERSION_H 0x1E
#define TXGBE_ISCSI_BOOT_CONFIG 0x07
+#define TXGBE_EEPROM_I2C_SRART_PTR 0x580
+#define TXGBE_EEPROM_I2C_END_PTR 0x800
#define TXGBE_MAX_MSIX_VECTORS 64
#define TXGBE_MAX_FDIR_INDICES 63
@@ -152,13 +188,15 @@
#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
-#define TXGBE_SP_MAX_TX_QUEUES 128
-#define TXGBE_SP_MAX_RX_QUEUES 128
-#define TXGBE_SP_RAR_ENTRIES 128
-#define TXGBE_SP_MC_TBL_SIZE 128
-#define TXGBE_SP_VFT_TBL_SIZE 128
-#define TXGBE_SP_RX_PB_SIZE 512
-#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+#define TXGBE_MAX_TXQ 128
+#define TXGBE_MAX_RXQ 128
+#define TXGBE_RAR_ENTRIES 128
+#define TXGBE_MC_TBL_SIZE 128
+#define TXGBE_VFT_TBL_SIZE 128
+#define TXGBE_RX_PB_SIZE 512
+#define TXGBE_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+
+#define TXGBE_MAX_VFS_DRV_LIMIT 63
#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20
@@ -249,7 +287,7 @@ struct txgbe_fdir_filter {
struct hlist_node fdir_node;
union txgbe_atr_input filter;
u16 sw_idx;
- u16 action;
+ u64 action;
};
/* TX/RX descriptor defines */
@@ -264,8 +302,8 @@ struct txgbe_fdir_filter {
#define TXGBE_DEFAULT_RX_WORK 128
#endif
-#define TXGBE_INTR_MISC BIT(0)
-#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1)
+#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
+#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1)
#define TXGBE_MAX_EITR GENMASK(11, 3)
@@ -276,6 +314,72 @@ void txgbe_up(struct wx *wx);
int txgbe_setup_tc(struct net_device *dev, u8 tc);
void txgbe_do_reset(struct net_device *netdev);
+#define TXGBE_LINK_SPEED_10GB_FULL 4
+#define TXGBE_LINK_SPEED_25GB_FULL 0x10
+
+#define TXGBE_SFF_IDENTIFIER_SFP 0x3
+#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMIT 0x4
+#define TXGBE_SFF_FCPI4_LIMITING 0x3
+#define TXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define TXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define TXGBE_SFF_25GBASESR_CAPABLE 0x2
+#define TXGBE_SFF_25GBASELR_CAPABLE 0x3
+#define TXGBE_SFF_25GBASEER_CAPABLE 0x4
+#define TXGBE_SFF_25GBASECR_91FEC 0xB
+#define TXGBE_SFF_25GBASECR_74FEC 0xC
+#define TXGBE_SFF_25GBASECR_NOFEC 0xD
+
+#define TXGBE_PHY_FEC_RS BIT(0)
+#define TXGBE_PHY_FEC_BASER BIT(1)
+#define TXGBE_PHY_FEC_OFF BIT(2)
+#define TXGBE_PHY_FEC_AUTO (TXGBE_PHY_FEC_OFF | \
+ TXGBE_PHY_FEC_BASER |\
+ TXGBE_PHY_FEC_RS)
+
+#define FW_PHY_GET_LINK_CMD 0xC0
+#define FW_PHY_SET_LINK_CMD 0xC1
+#define FW_READ_SFP_INFO_CMD 0xC5
+
+struct txgbe_sfp_id {
+ u8 identifier; /* A0H 0x00 */
+ u8 com_1g_code; /* A0H 0x06 */
+ u8 com_10g_code; /* A0H 0x03 */
+ u8 com_25g_code; /* A0H 0x24 */
+ u8 cable_spec; /* A0H 0x3C */
+ u8 cable_tech; /* A0H 0x08 */
+ u8 vendor_oui0; /* A0H 0x25 */
+ u8 vendor_oui1; /* A0H 0x26 */
+ u8 vendor_oui2; /* A0H 0x27 */
+ u8 reserved[3];
+};
+
+struct txgbe_hic_i2c_read {
+ struct wx_hic_hdr hdr;
+ struct txgbe_sfp_id id;
+};
+
+struct txgbe_hic_ephy_setlink {
+ struct wx_hic_hdr hdr;
+ u8 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 fec_mode;
+ u8 resv[4];
+};
+
+struct txgbe_hic_ephy_getlink {
+ struct wx_hic_hdr hdr;
+ u8 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 flow_ctl;
+ u8 power;
+ u8 fec_mode;
+ u8 resv[6];
+};
+
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
.name = _NAME, \
@@ -313,6 +417,7 @@ struct txgbe_nodes {
enum txgbe_misc_irqs {
TXGBE_IRQ_LINK = 0,
+ TXGBE_IRQ_GPIO,
TXGBE_IRQ_MAX
};
@@ -334,12 +439,19 @@ struct txgbe {
struct clk *clk;
struct gpio_chip *gpio;
unsigned int link_irq;
+ unsigned int gpio_irq;
+ u32 eicr;
/* flow director */
struct hlist_head fdir_filter_list;
union txgbe_atr_input fdir_mask;
int fdir_filter_count;
spinlock_t fdir_perfect_lock; /* spinlock for FDIR */
+
+ DECLARE_PHY_INTERFACE_MASK(sfp_interfaces);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ u8 link_port;
};
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 35d96c633a33..7502214cc7d5 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -28,6 +28,7 @@ config XILINX_AXI_EMAC
depends on HAS_IOMEM
depends on XILINX_DMA
select PHYLINK
+ select DIMLIB
help
This driver supports the 10/100/1000 Ethernet from Xilinx for the
AXI bus interface used in Xilinx Virtex FPGAs and Soc's.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index d64b8abcf018..5ff742103beb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -9,6 +9,7 @@
#ifndef XILINX_AXIENET_H
#define XILINX_AXIENET_H
+#include <linux/dim.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -112,19 +113,18 @@
#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
-#define XAXIDMA_DELAY_SHIFT 24
-#define XAXIDMA_COALESCE_SHIFT 16
-
#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
+/* Constant to convert delay counts to microseconds */
+#define XAXIDMA_DELAY_SCALE (125ULL * USEC_PER_SEC)
+
/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_USEC 50
-#define XAXIDMA_DFT_RX_THRESHOLD 1
-#define XAXIDMA_DFT_RX_USEC 50
+#define XAXIDMA_DFT_RX_USEC 16
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
@@ -484,7 +484,12 @@ struct skbuf_dma_descriptor {
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
* @napi_rx: NAPI RX control structure
+ * @rx_dim: DIM state for the receive queue
+ * @rx_dim_enabled: Whether DIM is enabled or not
+ * @rx_irqs: Number of interrupts
+ * @rx_cr_lock: Lock protecting @rx_dma_cr, its register, and @rx_dma_started
* @rx_dma_cr: Nominal content of RX DMA control register
+ * @rx_dma_started: Set when RX DMA is started
* @rx_bd_v: Virtual address of the RX buffer descriptor ring
* @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
* @rx_bd_num: Size of RX buffer descriptor ring
@@ -494,7 +499,9 @@ struct skbuf_dma_descriptor {
* @rx_bytes: RX byte count for statistics
* @rx_stat_sync: Synchronization object for RX stats
* @napi_tx: NAPI TX control structure
+ * @tx_cr_lock: Lock protecting @tx_dma_cr, its register, and @tx_dma_started
* @tx_dma_cr: Nominal content of TX DMA control register
+ * @tx_dma_started: Set when TX DMA is started
* @tx_bd_v: Virtual address of the TX buffer descriptor ring
* @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
* @tx_bd_num: Size of TX buffer descriptor ring
@@ -529,10 +536,6 @@ struct skbuf_dma_descriptor {
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
* @rxmem: Stores rx memory size for jumbo frame handling.
- * @coalesce_count_rx: Store the irq coalesce on RX side.
- * @coalesce_usec_rx: IRQ coalesce delay for RX
- * @coalesce_count_tx: Store the irq coalesce on TX side.
- * @coalesce_usec_tx: IRQ coalesce delay for TX
* @use_dmaengine: flag to check dmaengine framework usage.
* @tx_chan: TX DMA channel.
* @rx_chan: RX DMA channel.
@@ -566,7 +569,12 @@ struct axienet_local {
void __iomem *dma_regs;
struct napi_struct napi_rx;
+ struct dim rx_dim;
+ bool rx_dim_enabled;
+ u16 rx_irqs;
+ spinlock_t rx_cr_lock;
u32 rx_dma_cr;
+ bool rx_dma_started;
struct axidma_bd *rx_bd_v;
dma_addr_t rx_bd_p;
u32 rx_bd_num;
@@ -576,7 +584,9 @@ struct axienet_local {
struct u64_stats_sync rx_stat_sync;
struct napi_struct napi_tx;
+ spinlock_t tx_cr_lock;
u32 tx_dma_cr;
+ bool tx_dma_started;
struct axidma_bd *tx_bd_v;
dma_addr_t tx_bd_p;
u32 tx_bd_num;
@@ -607,10 +617,6 @@ struct axienet_local {
u32 max_frm_size;
u32 rxmem;
- u32 coalesce_count_rx;
- u32 coalesce_usec_rx;
- u32 coalesce_count_tx;
- u32 coalesce_usec_tx;
u8 use_dmaengine;
struct dma_chan *tx_chan;
struct dma_chan *rx_chan;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0f4b02fe6f85..6011d7eae0c7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -223,26 +223,62 @@ static void axienet_dma_bd_release(struct net_device *ndev)
lp->rx_bd_p);
}
+static u64 axienet_dma_rate(struct axienet_local *lp)
+{
+ if (lp->axi_clk)
+ return clk_get_rate(lp->axi_clk);
+ return 125000000; /* arbitrary guess if no clock rate set */
+}
+
/**
- * axienet_usec_to_timer - Calculate IRQ delay timer value
- * @lp: Pointer to the axienet_local structure
- * @coalesce_usec: Microseconds to convert into timer value
+ * axienet_calc_cr() - Calculate control register value
+ * @lp: Device private data
+ * @count: Number of completions before an interrupt
+ * @usec: Microseconds after the last completion before an interrupt
+ *
+ * Calculate a control register value based on the coalescing settings. The
+ * run/stop bit is not set.
*/
-static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
+static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
{
- u32 result;
- u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
+ u32 cr;
- if (lp->axi_clk)
- clk_rate = clk_get_rate(lp->axi_clk);
+ cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
+ XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (count > 1) {
+ u64 clk_rate = axienet_dma_rate(lp);
+ u32 timer;
+
+ /* 1 Timeout Interval = 125 * (clock period of SG clock) */
+ timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
+ XAXIDMA_DELAY_SCALE);
- /* 1 Timeout Interval = 125 * (clock period of SG clock) */
- result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
- (u64)125000000);
- if (result > 255)
- result = 255;
+ timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK));
+ cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ }
+
+ return cr;
+}
+
+/**
+ * axienet_coalesce_params() - Extract coalesce parameters from the CR
+ * @lp: Device private data
+ * @cr: The control register to parse
+ * @count: Number of packets before an interrupt
+ * @usec: Idle time (in usec) before an interrupt
+ */
+static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
+ u32 *count, u32 *usec)
+{
+ u64 clk_rate = axienet_dma_rate(lp);
+ u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
- return result;
+ *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr);
+ *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate);
}
/**
@@ -251,30 +287,12 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
*/
static void axienet_dma_start(struct axienet_local *lp)
{
+ spin_lock_irq(&lp->rx_cr_lock);
+
/* Start updating the Rx channel control register */
- lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
- /* Only set interrupt delay timer if not generating an interrupt on
- * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
- */
- if (lp->coalesce_count_rx > 1)
- lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
+ lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
- /* Start updating the Tx channel control register */
- lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
- /* Only set interrupt delay timer if not generating an interrupt on
- * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
- */
- if (lp->coalesce_count_tx > 1)
- lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
-
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
@@ -283,6 +301,14 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
(sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ lp->rx_dma_started = true;
+
+ spin_unlock_irq(&lp->rx_cr_lock);
+ spin_lock_irq(&lp->tx_cr_lock);
+
+ /* Start updating the Tx channel control register */
+ lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -291,6 +317,9 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ lp->tx_dma_started = true;
+
+ spin_unlock_irq(&lp->tx_cr_lock);
}
/**
@@ -626,14 +655,22 @@ static void axienet_dma_stop(struct axienet_local *lp)
int count;
u32 cr, sr;
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ spin_lock_irq(&lp->rx_cr_lock);
+
+ cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ lp->rx_dma_started = false;
+
+ spin_unlock_irq(&lp->rx_cr_lock);
synchronize_irq(lp->rx_irq);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ spin_lock_irq(&lp->tx_cr_lock);
+
+ cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ lp->tx_dma_started = false;
+
+ spin_unlock_irq(&lp->tx_cr_lock);
synchronize_irq(lp->tx_irq);
/* Give DMAs a chance to halt gracefully */
@@ -843,7 +880,7 @@ static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
dev_consume_skb_any(skbuf_dma->skb);
netif_txq_completed_wake(txq, 1, len,
CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
- 2 * MAX_SKB_FRAGS);
+ 2);
}
/**
@@ -877,7 +914,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
dma_dev = lp->tx_chan->device;
sg_len = skb_shinfo(skb)->nr_frags + 1;
- if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
+ if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
netif_stop_queue(ndev);
if (net_ratelimit())
netdev_warn(ndev, "TX ring unexpectedly full\n");
@@ -927,7 +964,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
txq = skb_get_tx_queue(lp->ndev, skb);
netdev_tx_sent_queue(txq, skb->len);
netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
- MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
+ 1, 2);
dmaengine_submit(dma_tx_desc);
dma_async_issue_pending(lp->tx_chan);
@@ -965,6 +1002,7 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
&size, budget);
if (packets) {
+ netdev_completed_queue(ndev, packets, size);
u64_stats_update_begin(&lp->tx_stat_sync);
u64_stats_add(&lp->tx_packets, packets);
u64_stats_add(&lp->tx_bytes, size);
@@ -982,7 +1020,9 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
* cause an immediate interrupt if any TX packets are
* already pending.
*/
+ spin_lock_irq(&lp->tx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ spin_unlock_irq(&lp->tx_cr_lock);
}
return packets;
}
@@ -1086,6 +1126,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++new_tail_ptr >= lp->tx_bd_num)
new_tail_ptr = 0;
WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
+ netdev_sent_queue(ndev, skb->len);
/* Start the transfer */
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
@@ -1244,11 +1285,25 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
if (packets < budget && napi_complete_done(napi, packets)) {
+ if (READ_ONCE(lp->rx_dim_enabled)) {
+ struct dim_sample sample = {
+ .time = ktime_get(),
+ /* Safe because we are the only writer */
+ .pkt_ctr = u64_stats_read(&lp->rx_packets),
+ .byte_ctr = u64_stats_read(&lp->rx_bytes),
+ .event_ctr = READ_ONCE(lp->rx_irqs),
+ };
+
+ net_dim(&lp->rx_dim, &sample);
+ }
+
/* Re-enable RX completion interrupts. This should
* cause an immediate interrupt if any RX packets are
* already pending.
*/
+ spin_lock_irq(&lp->rx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ spin_unlock_irq(&lp->rx_cr_lock);
}
return packets;
}
@@ -1286,11 +1341,14 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
/* Disable further TX completion interrupts and schedule
* NAPI to handle the completions.
*/
- u32 cr = lp->tx_dma_cr;
-
- cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
if (napi_schedule_prep(&lp->napi_tx)) {
+ u32 cr;
+
+ spin_lock(&lp->tx_cr_lock);
+ cr = lp->tx_dma_cr;
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ spin_unlock(&lp->tx_cr_lock);
__napi_schedule(&lp->napi_tx);
}
}
@@ -1331,11 +1389,16 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
/* Disable further RX completion interrupts and schedule
* NAPI receive.
*/
- u32 cr = lp->rx_dma_cr;
-
- cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
if (napi_schedule_prep(&lp->napi_rx)) {
+ u32 cr;
+
+ spin_lock(&lp->rx_cr_lock);
+ cr = lp->rx_dma_cr;
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ spin_unlock(&lp->rx_cr_lock);
+
__napi_schedule(&lp->napi_rx);
}
}
@@ -1628,6 +1691,7 @@ err_free_eth_irq:
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
err_phy:
+ cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1657,6 +1721,7 @@ static int axienet_stop(struct net_device *ndev)
napi_disable(&lp->napi_rx);
}
+ cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
@@ -1688,6 +1753,7 @@ static int axienet_stop(struct net_device *ndev)
dma_release_channel(lp->tx_chan);
}
+ netdev_reset_queue(ndev);
axienet_iow(lp, XAE_IE_OFFSET, 0);
if (lp->eth_irq > 0)
@@ -2002,6 +2068,87 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
}
/**
+ * axienet_update_coalesce_rx() - Set RX CR
+ * @lp: Device private data
+ * @cr: Value to write to the RX CR
+ * @mask: Bits to set from @cr
+ */
+static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
+ u32 mask)
+{
+ spin_lock_irq(&lp->rx_cr_lock);
+ lp->rx_dma_cr &= ~mask;
+ lp->rx_dma_cr |= cr;
+ /* If DMA isn't started, then the settings will be applied the next
+ * time dma_start() is called.
+ */
+ if (lp->rx_dma_started) {
+ u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+
+ /* Don't enable IRQs if they are disabled by NAPI */
+ if (reg & XAXIDMA_IRQ_ALL_MASK)
+ cr = lp->rx_dma_cr;
+ else
+ cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ }
+ spin_unlock_irq(&lp->rx_cr_lock);
+}
+
+/**
+ * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
+ * @lp: Device private data
+ */
+static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
+{
+ return min(1 << (lp->rx_dim.profile_ix << 1), 255);
+}
+
+/**
+ * axienet_rx_dim_work() - Adjust RX DIM settings
+ * @work: The work struct
+ */
+static void axienet_rx_dim_work(struct work_struct *work)
+{
+ struct axienet_local *lp =
+ container_of(work, struct axienet_local, rx_dim.work);
+ u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
+ u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK |
+ XAXIDMA_IRQ_ERROR_MASK;
+
+ axienet_update_coalesce_rx(lp, cr, mask);
+ lp->rx_dim.state = DIM_START_MEASURE;
+}
+
+/**
+ * axienet_update_coalesce_tx() - Set TX CR
+ * @lp: Device private data
+ * @cr: Value to write to the TX CR
+ * @mask: Bits to set from @cr
+ */
+static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
+ u32 mask)
+{
+ spin_lock_irq(&lp->tx_cr_lock);
+ lp->tx_dma_cr &= ~mask;
+ lp->tx_dma_cr |= cr;
+ /* If DMA isn't started, then the settings will be applied the next
+ * time dma_start() is called.
+ */
+ if (lp->tx_dma_started) {
+ u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+
+ /* Don't enable IRQs if they are disabled by NAPI */
+ if (reg & XAXIDMA_IRQ_ALL_MASK)
+ cr = lp->tx_dma_cr;
+ else
+ cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ }
+ spin_unlock_irq(&lp->tx_cr_lock);
+}
+
+/**
* axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
* @ndev: Pointer to net_device structure
* @ecoalesce: Pointer to ethtool_coalesce structure
@@ -2021,11 +2168,23 @@ axienet_ethtools_get_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
-
- ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
- ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
- ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
- ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
+ u32 cr;
+
+ ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
+
+ spin_lock_irq(&lp->rx_cr_lock);
+ cr = lp->rx_dma_cr;
+ spin_unlock_irq(&lp->rx_cr_lock);
+ axienet_coalesce_params(lp, cr,
+ &ecoalesce->rx_max_coalesced_frames,
+ &ecoalesce->rx_coalesce_usecs);
+
+ spin_lock_irq(&lp->tx_cr_lock);
+ cr = lp->tx_dma_cr;
+ spin_unlock_irq(&lp->tx_cr_lock);
+ axienet_coalesce_params(lp, cr,
+ &ecoalesce->tx_max_coalesced_frames,
+ &ecoalesce->tx_coalesce_usecs);
return 0;
}
@@ -2049,22 +2208,56 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
+ bool new_dim = ecoalesce->use_adaptive_rx_coalesce;
+ bool old_dim = lp->rx_dim_enabled;
+ u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK;
- if (netif_running(ndev)) {
+ if (ecoalesce->rx_max_coalesced_frames > 255 ||
+ ecoalesce->tx_max_coalesced_frames > 255) {
+ NL_SET_ERR_MSG(extack, "frames must be less than 256");
+ return -EINVAL;
+ }
+
+ if (!ecoalesce->rx_max_coalesced_frames ||
+ !ecoalesce->tx_max_coalesced_frames) {
+ NL_SET_ERR_MSG(extack, "frames must be non-zero");
+ return -EINVAL;
+ }
+
+ if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) &&
+ !ecoalesce->rx_coalesce_usecs) ||
+ (ecoalesce->tx_max_coalesced_frames > 1 &&
+ !ecoalesce->tx_coalesce_usecs)) {
NL_SET_ERR_MSG(extack,
- "Please stop netif before applying configuration");
- return -EBUSY;
+ "usecs must be non-zero when frames is greater than one");
+ return -EINVAL;
+ }
+
+ if (new_dim && !old_dim) {
+ cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
+ ecoalesce->rx_coalesce_usecs);
+ } else if (!new_dim) {
+ if (old_dim) {
+ WRITE_ONCE(lp->rx_dim_enabled, false);
+ napi_synchronize(&lp->napi_rx);
+ flush_work(&lp->rx_dim.work);
+ }
+
+ cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
+ ecoalesce->rx_coalesce_usecs);
+ } else {
+ /* Dummy value for count just to calculate timer */
+ cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
+ mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK;
}
- if (ecoalesce->rx_max_coalesced_frames)
- lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
- if (ecoalesce->rx_coalesce_usecs)
- lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
- if (ecoalesce->tx_max_coalesced_frames)
- lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
- if (ecoalesce->tx_coalesce_usecs)
- lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
+ axienet_update_coalesce_rx(lp, cr, mask);
+ if (new_dim && !old_dim)
+ WRITE_ONCE(lp->rx_dim_enabled, true);
+ cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
+ ecoalesce->tx_coalesce_usecs);
+ axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
return 0;
}
@@ -2302,7 +2495,8 @@ axienet_ethtool_get_rmon_stats(struct net_device *dev,
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_USECS,
+ ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -2331,11 +2525,12 @@ static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
}
static void axienet_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- phylink_mii_c22_pcs_get_state(pcs_phy, state);
+ phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
}
static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
@@ -2484,6 +2679,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_dma_stop(lp);
+ netdev_reset_queue(ndev);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
@@ -2784,7 +2980,7 @@ static int axienet_probe(struct platform_device *pdev)
}
}
if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
- dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
+ dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
ret = -EINVAL;
goto cleanup_clk;
}
@@ -2843,10 +3039,15 @@ static int axienet_probe(struct platform_device *pdev)
axienet_set_mac_address(ndev, NULL);
}
- lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
- lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
- lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
+ spin_lock_init(&lp->rx_cr_lock);
+ spin_lock_init(&lp->tx_cr_lock);
+ INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
+ lp->rx_dim_enabled = true;
+ lp->rx_dim.profile_ix = 1;
+ lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
+ XAXIDMA_DFT_RX_USEC);
+ lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
+ XAXIDMA_DFT_TX_USEC);
ret = axienet_mdio_setup(lp);
if (ret)
@@ -2876,12 +3077,12 @@ static int axienet_probe(struct platform_device *pdev)
}
of_node_put(np);
lp->pcs.ops = &axienet_pcs_ops;
- lp->pcs.neg_mode = true;
lp->pcs.poll = true;
}
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.mac_managed_pm = true;
lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10FD | MAC_100FD | MAC_1000FD;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index a2ab1c150822..e1e7f65553e7 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -394,16 +394,20 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
__raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
}
-static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int ixp4xx_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config cfg;
struct ixp46x_ts_regs *regs;
struct port *port = netdev_priv(netdev);
int ret;
int ch;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
+ if (!cpu_is_ixp46x())
+ return -EOPNOTSUPP;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index);
if (ret)
@@ -412,10 +416,10 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
ch = PORT2CHANNEL(port);
regs = port->timesync_regs;
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
port->hwts_rx_en = 0;
break;
@@ -431,39 +435,45 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
+ port->hwts_tx_en = cfg->tx_type == HWTSTAMP_TX_ON;
/* Clear out any old time stamps. */
__raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
&regs->channel[ch].ch_event);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
-static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int ixp4xx_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
{
- struct hwtstamp_config cfg;
struct port *port = netdev_priv(netdev);
- cfg.flags = 0;
- cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ if (!cpu_is_ixp46x())
+ return -EOPNOTSUPP;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ cfg->flags = 0;
+ cfg->tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
switch (port->hwts_rx_en) {
case 0:
- cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ cfg->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case PTP_SLAVE_MODE:
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
break;
case PTP_MASTER_MODE:
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
break;
default:
WARN_ON_ONCE(1);
return -ERANGE;
}
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
@@ -985,21 +995,6 @@ static void eth_set_mcast_list(struct net_device *dev)
}
-static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (cpu_is_ixp46x()) {
- if (cmd == SIOCSHWTSTAMP)
- return hwtstamp_set(dev, req);
- if (cmd == SIOCGHWTSTAMP)
- return hwtstamp_get(dev, req);
- }
-
- return phy_mii_ioctl(dev->phydev, req, cmd);
-}
-
/* ethtool support */
static void ixp4xx_get_drvinfo(struct net_device *dev,
@@ -1433,9 +1428,11 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_change_mtu = ixp4xx_eth_change_mtu,
.ndo_start_xmit = eth_xmit,
.ndo_set_rx_mode = eth_set_mcast_list,
- .ndo_eth_ioctl = eth_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_hwtstamp_get = ixp4xx_hwtstamp_get,
+ .ndo_hwtstamp_set = ixp4xx_hwtstamp_set,
};
static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)